content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_devp2p_cmd_id(msg: bytes) -> int: """Return the cmd_id for the given devp2p msg. The cmd_id, also known as the payload type, is always the first entry of the RLP, interpreted as an integer. """ return rlp.decode(msg[:1], sedes=rlp.sedes.big_endian_int)
bd930be7205871183ac9cb4814ae793f5524964d
26,926
def custom_cached(name, *old_method, **options): """ decorator to convert a method or function into a lazy one. note that this cache type supports expire time and will consider method inputs in caching. the result will be calculated once and then it will be cached. each result will be cached using a tuple of class type, method name, inputs, current user and component key as a key in the cache. that this decorator could be used on both instance or class level methods and properties or stand-alone functions. :param str name: the cache name to be used. for example: `redis`, `memcached`, `complex` or ... :param function | property old_method: the original decorated method or function. :keyword bool consider_user: specifies that current user must be included in key generation. if not provided, it will be get from `caching` config store. :keyword int expire: expire time for given key in milliseconds. if not provided, it will be get from `caching` config store. :keyword bool refreshable: specifies that cached item's expire time must be extended on each hit. if not provided, it will be get from `caching` config store. :returns: method or function result. """ def decorator(method): """ decorates the given method or function and makes it a lazy one. :param function | property method: decorated method or function. :returns: method or function result. """ def wrapper(*args, **kwargs): """ decorates the given method or function and makes it a lazy one. :param object args: function positional arguments. :param object kwargs: function keyword arguments. :returns: method or function result. """ result = caching_services.try_get(name, method, args, kwargs, **options) if result is not None: return result result = method(*args, **kwargs) caching_services.try_set(name, result, method, args, kwargs, **options) return result return update_wrapper(wrapper, method) if len(old_method) > 0: return decorator(old_method[0]) return decorator
a6732fee6cd484068d3171079bf4989d5367adbc
26,927
def _full_url(url): """ Assemble the full url for a url. """ url = url.strip() for x in ['http', 'https']: if url.startswith('%s://' % x): return url return 'http://%s' % url
cfb56cf98d3c1dd5ee2b58f53a7792e927c1823f
26,928
from rx.core.operators.replay import _replay from typing import Optional import typing from typing import Callable from typing import Union def replay(mapper: Optional[Mapper] = None, buffer_size: Optional[int] = None, window: Optional[typing.RelativeTime] = None, scheduler: Optional[typing.Scheduler] = None ) -> Callable[[Observable], Union[Observable, ConnectableObservable]]: """The `replay` operator. Returns an observable sequence that is the result of invoking the mapper on a connectable observable sequence that shares a single subscription to the underlying sequence replaying notifications subject to a maximum time length for the replay buffer. This operator is a specialization of Multicast using a ReplaySubject. Examples: >>> res = replay(buffer_size=3) >>> res = replay(buffer_size=3, window=0.5) >>> res = replay(None, 3, 0.5) >>> res = replay(lambda x: x.take(6).repeat(), 3, 0.5) Args: mapper: [Optional] Selector function which can use the multicasted source sequence as many times as needed, without causing multiple subscriptions to the source sequence. Subscribers to the given source will receive all the notifications of the source subject to the specified replay buffer trimming policy. buffer_size: [Optional] Maximum element count of the replay buffer. window: [Optional] Maximum time length of the replay buffer. scheduler: [Optional] Scheduler the observers are invoked on. Returns: An operator function that takes an observable source and returns an observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a mapper function. """ return _replay(mapper, buffer_size, window, scheduler=scheduler)
8a5ff1cbbc5c12d63e0773f86d02550bf5be65c4
26,929
import io import torch def read_from_mc(path: str, flush=False) -> object: """ Overview: read file from memcache, file must be saved by `torch.save()` Arguments: - path (:obj:`str`): file path in local system Returns: - (:obj`data`): deserialized data """ global mclient _ensure_memcached() value = mc.pyvector() if flush: mclient.Get(path, value, mc.MC_READ_THROUGH) return else: mclient.Get(path, value) value_buf = mc.ConvertBuffer(value) value_str = io.BytesIO(value_buf) value_str = torch.load(value_str, map_location='cpu') return value_str
c606b131ba3d65c6b3dd320ae6a71983a79420c8
26,930
import wave import struct def write_wav(file, samples, nframes=-1, nchannels=2, sampwidth=2, framerate=44100, bufsize=2048): """ Writes the samples to a wav file. :param file: can be a filename, or a file object. :param samples: the samples :param nframes: the number of frames :param nchannels: the number of channels :param sampwidth: the width of the sample in bytes :param framerate: the frame rate :param bufsize: the size of the buffer to write into the file :return: file """ w = wave.open(file, 'wb') w.setparams((nchannels, sampwidth, framerate, nframes, 'NONE', 'not compressed')) max_amplitude = float(int((2 ** (sampwidth * 8)) / 2) - 1) # split the samples into chunks (to reduce memory consumption and improve performance) for chunk in grouper(bufsize, samples): frames = b''.join( b''.join(struct.pack('h', int(max_amplitude * sample)) for sample in channels) for channels in chunk if channels is not None) w.writeframesraw(frames) w.close() return file
ec38069d59dde8dafd5aa98a826ee699ded15b29
26,931
async def check_login(self) -> dict: """Check loging and return user credentials.""" session = await get_session(self.request) loggedin = UserAdapter().isloggedin(session) if not loggedin: informasjon = "Logg inn for å se denne siden" return web.HTTPSeeOther(location=f"/login?informasjon={informasjon}") # type: ignore return {"name": session["username"], "token": session["token"]}
48dd910c143e4ca8f90d8d3da2be2ce6ed275b1b
26,932
from typing import Callable def get_knn_func_data_points( data_points: np.ndarray, pairwise_distances: np.ndarray = None, approx_nn: ApproxNN = None, metric: Callable = fastdist.euclidean, metric_name: str = "euclidean", ) -> KnnFunc: """ Gets a K-nearest neighbour callable for data points, used in `compute_gad`. Parameters ---------- data_points : np.ndarray Data points. pairwise_distances : np.ndarray, optional Pairwise distances of data points (defaults to None). approx_nn : ApproxNN, optional ApproxNN instance. metric : Callable, optional fastdist metric; only required if `pairwise_distances` and `approx_nn` are None (defaults to fastdist.euclidean). metric_name : str, optional String name of the `metric` callable (defaults to "euclidean"). Returns ------- knn_func : KnnFunc K-nearest neighbour callable for data points. """ if approx_nn is not None: return lambda point_idx, k_neighbours: approx_nn.search( query_vector=data_points[point_idx], k_neighbours=k_neighbours, excluded_neighbour_indices=[point_idx], return_distances=True, ) elif pairwise_distances is not None: return lambda point_idx, k_neighbours: get_nearest_neighbours( distances=pairwise_distances[point_idx], k_neighbours=k_neighbours, ) else: return lambda point_idx, k_neighbours: get_nearest_neighbours( distances=fastdist.vector_to_matrix_distance( u=data_points[point_idx], m=data_points, metric=metric, metric_name=metric_name, ), k_neighbours=k_neighbours, )
897289271aef24610dc949fefd14761a3bea4322
26,933
from aiida import orm def get_database_nodecount(): """Description pending""" query = orm.QueryBuilder() query.append(orm.Node) return query.count()
dcb71a022d36c2602125cbba4dccd1c7ccb16281
26,934
def shower_profile(xdat, alpha, beta, x0): """Function that represents the shower profile. Takes in the event and predicts total gamma energy using alpha and beta to fit. Described in source in README. shower_optimize() fits for alpha and beta. """ #measured_energy = event.measured_energy #hits = event.hits #measured_energy, x, y, z = xdat #pos = np.array((x, y, z)) gamma_energy, distance = xdat #start_pos = hits[0] #end_pos = hits[-1] #distance = np.linalg.norm(start_pos - end_pos) gamma = special.gamma(alpha) numerator = (beta * distance)**(alpha - 1) * beta * np.exp(-1 * beta * distance * x0) return gamma_energy * (numerator / gamma)
99c94604a742ffd44e21b4c5cd2061f6293a4d72
26,935
def has_prefix(s, sub_index): """ This function can make sure that the current string(recorded in index) is in the dictionary. (or it will return False and stop finding. :param s: string, the user input word :param sub_index: list, current list (recorded in the index type) :return: (bool) If there is any words with prefix stored in current list(recorded in index) """ current_str = '' for digit in sub_index: current_str += s[digit] for word in dictionary: if word.startswith(current_str): return True return False
a33ce5d13b473f264636bfb430d0191450103020
26,936
def get_experiment_tag(name): """Interfaces to callables that add a tag to the matplotlib axis. This is a light-weight approach to a watermarking of a plot in a way that is common in particle physics experiments and groups. `name` can be an identifier for one of the styles provided here. Alternatively, a custom callable can be defined. By using this function we have a common interface for both cases. """ if name in provided_experiment_tags: return provided_experiment_tags[name] elif callable(name): # This option allows providing your own tags. return name else: valid_keys = ", ".join(provided_experiment_tags) print( f"Ignored invalid experiment tag: {name}. " f"Choose one of: {valid_keys}." ) return do_nothing
9dc11a6caac2010aa99e288897a6f60273d1a372
26,937
def _add_layer1(query, original_data): """Add data from successful layer1 MIB query to original data provided. Args: query: MIB query object original_data: Two keyed dict of data Returns: new_data: Aggregated data """ # Process query result = query.layer1() new_data = _add_data( result, original_data) # Return return new_data
ab2d3ad95435dd2fcc6b99745f115cab08aa6699
26,938
def encodeUcs2(text): """ UCS2 text encoding algorithm Encodes the specified text string into UCS2-encoded bytes. @param text: the text string to encode @return: A bytearray containing the string encoded in UCS2 encoding @rtype: bytearray """ result = bytearray() for b in map(ord, text): result.append(b >> 8) result.append(b & 0xFF) return result
da2243ffc959db64a196a312522f967dce1da9d1
26,939
def w_kvtype(stype: str, ctx: dict) -> dict: """ Make definition from ktype or vtype option """ stypes = {'Boolean': 'boolean', 'Integer': 'integer', 'Number': 'number', 'String': 'string'} if stype in stypes: return {'type': stypes[stype]} if stype[0] in (OPTION_ID['enum'], OPTION_ID['pointer']): tdef = ctx['type_defs'][stype[1:]] topts = topts_s2d(tdef[TypeOptions]) fields = get_enum_items(tdef, topts, ctx['type_defs']) idopt = 'id' in topts return w_enum(fields, FieldID if idopt else FieldName, FieldDesc, idopt, ctx) return {'$ref': f'#/definitions/{stype}'}
e09bc0ceaed2edc2927ddbc4b6bc38bcec5a345d
26,940
from typing import Optional def create_article_number_sequence( shop_id: ShopID, prefix: str, *, value: Optional[int] = None ) -> ArticleNumberSequence: """Create an article number sequence.""" sequence = DbArticleNumberSequence(shop_id, prefix, value=value) db.session.add(sequence) try: db.session.commit() except IntegrityError as exc: db.session.rollback() raise ArticleNumberSequenceCreationFailed( f'Could not sequence with prefix "{prefix}"' ) from exc return _db_entity_to_article_number_sequence(sequence)
3be2f0399fee0c01a117ffef0f790bae80750db0
26,941
import math def transform_side(side,theta): """Transform the coordinates of the side onto the perpendicular plane using Euler-Rodrigues formula Input: side coordinates, plane Output: new coordinates """ new_side = list() #calculating axis of rotation axis = side[len(side)-1][0]-side[0][0],0,0 #converting theta to radians rad = math.radians(theta) for i in side: #calculating vector for each point in side side_vector = i[0],i[1],0 #Euler-Rodrigues formula to rotate vectors axis = np.asarray(axis) theta = np.asarray(rad) axis = axis/math.sqrt(np.dot(axis, axis)) a = math.cos(theta/2) b, c, d = -axis*math.sin(theta/2) aa, bb, cc, dd = a*a, b*b, c*c, d*d bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d multiplier = np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)], [2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)], [2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]]) transform_vector = (np.dot(multiplier, side_vector)) #round points to nearest whole number, add to list of transformed side coordinates folded_vector = round(transform_vector[0]),round(transform_vector[1]),round(transform_vector[2]) new_side.append(folded_vector) return new_side #moved_side = move_to_actual_coord(new_side,actual_coordinates) #return moved_side
41e71676ee138cc355ae3990e74aeae6176d4f94
26,945
def get_approved_listings(): """ Gets pending listings for a user :param user_id :return: """ user_id = request.args.get('user_id') approved_listings = [] if user_id: approved_listings = Listing.query.filter_by(approved=True, created_by=user_id) else: approved_listings = Listing.query.filter_by(approved=True) return jsonify({ "listings": [listing.serialize for listing in approved_listings] })
1d20094c8b3ca10a23a49aa6c83020ae3cdf65e3
26,946
def edit_profile(request): """ 编辑公司信息 :param request: :return: """ user_id = request.session.get("user_id") email = request.session.get("email") # username = request.session.get("username") if request.session.get("is_superuser"): # 管理员获取全部公司信息 data = models.IotProfile.objects.all().values() # 测试时间 分页 换算 # values方便转化为json else: data = models.Message.objects.filter(user_id=user_id).values() return JsonResponse({"result":0,"message":"sussess", "data": data})
f106b3f6f35497bf5db0a71b81b520ac023c2b37
26,947
from typing import Optional def get_vault(vault_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVaultResult: """ This data source provides details about a specific Vault resource in Oracle Cloud Infrastructure Kms service. Gets the specified vault's configuration information. As a provisioning operation, this call is subject to a Key Management limit that applies to the total number of requests across all provisioning read operations. Key Management might throttle this call to reject an otherwise valid request when the total rate of provisioning read operations exceeds 10 requests per second for a given tenancy. ## Example Usage ```python import pulumi import pulumi_oci as oci test_vault = oci.kms.get_vault(vault_id=oci_kms_vault["test_vault"]["id"]) ``` :param str vault_id: The OCID of the vault. """ __args__ = dict() __args__['vaultId'] = vault_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('oci:kms/getVault:getVault', __args__, opts=opts, typ=GetVaultResult).value return AwaitableGetVaultResult( compartment_id=__ret__.compartment_id, crypto_endpoint=__ret__.crypto_endpoint, defined_tags=__ret__.defined_tags, display_name=__ret__.display_name, freeform_tags=__ret__.freeform_tags, id=__ret__.id, is_primary=__ret__.is_primary, management_endpoint=__ret__.management_endpoint, replica_details=__ret__.replica_details, restore_from_file=__ret__.restore_from_file, restore_from_object_store=__ret__.restore_from_object_store, restore_trigger=__ret__.restore_trigger, restored_from_vault_id=__ret__.restored_from_vault_id, state=__ret__.state, time_created=__ret__.time_created, time_of_deletion=__ret__.time_of_deletion, vault_id=__ret__.vault_id, vault_type=__ret__.vault_type)
586bc794d066cd3e4040aff04a39a81762016c41
26,948
def _make_dist_mat_sa_utils(): """Generate a sample distance matrix to test spatial_analysis_utils Returns: xarray.DataArray: a sample distance matrix to use for testing spatial_analysis_utils """ dist_mat = np.zeros((10, 10)) np.fill_diagonal(dist_mat, 0) # Create distance matrix where cells positive for marker 1 and 2 are within the dist_lim of # each other, but not the other groups. This is repeated for cells positive for marker 3 and 4, # and for cells positive for marker 5. dist_mat[1:4, 0] = 50 dist_mat[0, 1:4] = 50 dist_mat[4:9, 0] = 200 dist_mat[0, 4:9] = 200 dist_mat[9, 0] = 500 dist_mat[0, 9] = 500 dist_mat[2:4, 1] = 50 dist_mat[1, 2:4] = 50 dist_mat[4:9, 1] = 150 dist_mat[1, 4:9] = 150 dist_mat[9, 1:9] = 200 dist_mat[1:9, 9] = 200 dist_mat[3, 2] = 50 dist_mat[2, 3] = 50 dist_mat[4:9, 2] = 150 dist_mat[2, 4:9] = 150 dist_mat[4:9, 3] = 150 dist_mat[3, 4:9] = 150 dist_mat[5:9, 4] = 50 dist_mat[4, 5:9] = 50 dist_mat[6:9, 5] = 50 dist_mat[5, 6:9] = 50 dist_mat[7:9, 6] = 50 dist_mat[6, 7:9] = 50 dist_mat[8, 7] = 50 dist_mat[7, 8] = 50 # add some randomization to the ordering coords_in_order = np.arange(dist_mat.shape[0]) coords_permuted = deepcopy(coords_in_order) np.random.shuffle(coords_permuted) dist_mat = dist_mat[np.ix_(coords_permuted, coords_permuted)] # we have to 1-index coords because people will be labeling their cells 1-indexed coords_dist_mat = [coords_permuted + 1, coords_permuted + 1] dist_mat = xr.DataArray(dist_mat, coords=coords_dist_mat) return dist_mat
706ce73e1a5e66cdf521df7d0e1bf2c43bd09d02
26,950
def _workSE(args): """Worker function for batch source extraction.""" imageKey, imagePath, weightPath, weightType, psfPath, configs, \ checkImages, catPostfix, workDir, defaultsPath = args catalogName = "_".join((str(imageKey), catPostfix)) se = SourceExtractor(imagePath, catalogName, weightPath=weightPath, weightType=weightType, psfPath=psfPath, configs=configs, workDir=workDir, defaultsPath=defaultsPath) if checkImages is not None: se.set_check_images(checkImages, workDir) se.run() return imageKey, se
be98194a91108268bb7873fb275416a394aee3c1
26,951
def split_match(date,time,station): """ Function to find and extract the measuremnt from Jack Walpoles splititng data for the same event. This matching is done by finding an entry with the same date stamp. Inital testing has shown this to be a unique identifier. station MUST be a string of a station code date MUST be a int/float of with the format yyyyjjj where j is julian day """ # ------- # First we need to read in the splitting observations made by Jack Walpole. # We also slice out splitting observations just from the station of interest and then reset the indexing so WL_split's indicies start from [0] # ------- WL_split = Jacks_SKS_RAW(station) # ------- # Using a Pandas DataFrame we can slice out any rows that match out dare stamp # The the iloc function is used to extract the requisite values (Here this is trivial as match should be a single row dataframe, but the values still need to be extracted this way) # match = WL_split[(WL_split['DATE'] == date)] # slicies rows in WL_split that have the same datestamp as date. In theory this should return a single row DataFrame if len(match) == 1: (fast,dfast,tlag,dtlag,wbeg,wend) = (match.iloc[0]['FAST'],match.iloc[0]['DFAST'],match.iloc[0]['TLAG'],match.iloc[0]['DTLAG'],match.iloc[0]['WBEG'],match.iloc[0]['WEND']) elif len(match) == 0: print("The provided datestamp {} does not match any obervations made by JW".format(date)) (fast,dfast,tlag,dtlag,wbeg,wend) = ('NaN','NaN','NaN','NaN','40','80') else: print("There has been more than one match, now testing by timestamp also!\n") time_test = int(str(time).zfill(6)[0:4]) #Jacks timestamps are only hhmm so I need to strip off the seconds from my timestamps. WARNING it is possible my timestamps are different to Jacks!! print('My timestamp {}, Jacks timestamp {}'.format(time_test,match.iloc[0]['TIME'])) match2 = WL_split[(WL_split['DATE'] == date) & (WL_split['TIME'] == time_test)] # print(match2) (fast,dfast,tlag,dtlag,wbeg,wend) = (match.iloc[0]['FAST'],match.iloc[0]['DFAST'],match.iloc[0]['TLAG'],match.iloc[0]['DTLAG'],match.iloc[0]['WBEG'],match.iloc[0]['WEND']) if len(match2) == 0: #If there is still no match (fast,dfast,tlag,dtlag,wbeg,wend) = ('NaN','NaN','NaN','NaN','NaN','NaN') print("No match found") return fast,dfast,tlag,dtlag,wbeg,wend
b1a7a1b4265719c8d79274169593754a7f683bc2
26,952
import numpy def schoolf_eq(temps, B0, E, E_D, T_pk): """Schoolfield model, used for calculating trait values at a given temperature""" function = B0 * exp(-E * ((1/(K*temps)) - (1/(K*283.15)))) / (1 + (E/(E_D - E)) * exp((E_D / K) * (1 / T_pk - 1 / temps))) return numpy.array(map(log,function), dtype=numpy.float64)
67969317bee63d759071c86840e4ae9ecfb924b4
26,953
from typing import Union from typing import Type from typing import Any from typing import Iterable def heat_type_of( obj: Union[str, Type[datatype], Any, Iterable[str, Type[datatype], Any]] ) -> Type[datatype]: """ Returns the corresponding HeAT data type of given object, i.e. scalar, array or iterable. Attempts to determine the canonical data type based on the following priority list: 1. dtype property 2. type(obj) 3. type(obj[0]) Parameters ---------- obj : scalar or DNDarray or iterable The object for which to infer the type. Raises ------- TypeError If the object's type cannot be inferred. """ # attempt to access the dtype property try: return canonical_heat_type(obj.dtype) except (AttributeError, TypeError): pass # attempt type of object itself try: return canonical_heat_type(type(obj)) except TypeError: pass # last resort, type of the object at first position try: return canonical_heat_type(type(obj[0])) except (KeyError, IndexError, TypeError): raise TypeError("data type of {} is not understood".format(obj))
2637d7559bb1ff3d6a1b07d9cedd10f1eb57e564
26,955
from typing import Union from typing import Tuple from typing import Dict from typing import Any def get_field_from_acc_out_ty( acc_out_ty_or_dict: Union[Tuple, Dict[str, Any]], field: str ): """ After tracing NamedTuple inputs are converted to standard tuples, so we cannot access them by name directly. Use this helper instead. """ if isinstance(acc_out_ty_or_dict, dict): acc_out_ty = acc_out_ty_or_dict["acc_out_ty"] else: acc_out_ty = acc_out_ty_or_dict return acc_out_ty[TensorMetadata._fields.index(field)]
44b0cac3737823c6ea7aa4b924683d17184711a6
26,956
def bhc(data,alpha,beta=None): """ This function does a bayesian clustering. Alpha: Hyperparameter Beta: Hyperparameter If beta is not given, it uses the Multinomial-Dirichlet. Otherwise it uses Bernoulli-Beta. """ n_cluster = data.shape[0] nodekey = n_cluster list_clusters = [i for i in range(n_cluster)] clusters = dict() clusters["n_cluster"] = n_cluster clusters[n_cluster] = (1,[str(i+1) for i in range(n_cluster)]) tree = {str(i+1):Node(key=i+1,data=np.array([data[i,:]]),alpha=alpha,beta=beta,left=None,right=None,parent=None) for i in range(n_cluster)} while n_cluster > 1: "Find the pair with the highest probability of the merged hypothesis" r_k_max = -1000000 for left,right in list(it.combinations(tree.keys(), 2)): nodekey += 1 aux_data = np.vstack((tree[left].data,tree[right].data)) aux_node = Node(nodekey,aux_data,alpha,beta=beta,left=tree[left],right=tree[right]) r_k = posterior(aux_node) #print(r_k) if r_k > r_k_max: r_k_max = r_k merged_left = left merged_right = right merged_node = aux_node merged_node.r_k = r_k_max merged_node.left.parent = merged_node merged_node.right.parent = merged_node newkey = merged_left+','+ merged_right del tree[merged_left] del tree[merged_right] tree[newkey] = merged_node n_cluster -= 1 clusters[n_cluster] = (r_k_max,list(tree.keys())) nodekey +=1 return clusters,merged_node
a0c6cb588a66dce92a2041a02eb60b66a12422ae
26,958
import logging def get_sql_value(conn_id, sql): """ get_sql_value executes a sql query given proper connection parameters. The result of the sql query should be one and only one numeric value. """ hook = _get_hook(conn_id) result = hook.get_records(sql) if len(result) > 1: logging.info("Result: %s contains more than 1 entry", str(result)) raise ValueError("Result from sql query contains more than 1 entry") if len(result) < 1: raise ValueError("No result returned from sql query") if len(result[0]) != 1: logging.info("Result: %s does not contain exactly 1 column", str(result[0])) raise ValueError("Result from sql query does not contain exactly 1 column") return result[0][0]
24cc8c633f855b5b07c602d247a543268972d615
26,959
def get_rendered_config(path: str) -> str: """Return a config as a string with placeholders replaced by values of the corresponding environment variables.""" with open(path) as f: txt = f.read() matches = pattern.findall(txt) for match in matches: txt = txt.replace("[" + match + "]", _get_env_var(match)) return txt
93445db04960fd66cc88673f397eb959d2e982ec
26,960
def units_to_msec(units, resolution): """Convert BLE specific units to milliseconds.""" time_ms = units * float(resolution) / 1000 return time_ms
49588d7961593b2ba2e57e1481d6e1430b4a3671
26,961
def IR(numOfLayer, useIntraGCN, useInterGCN, useRandomMatrix, useAllOneMatrix, useCov, useCluster, class_num): """Constructs a ir-18/ir-50 model.""" model = Backbone(numOfLayer, useIntraGCN, useInterGCN, useRandomMatrix, useAllOneMatrix, useCov, useCluster, class_num) return model
dbe638d3cd38c66387c67e0854b07ea7f800909f
26,962
import re def extractNextPageToken(resultString): """ Calling GASearchVariantsResponse.fromJsonString() can be slower than doing the variant search in the first place; instead we use a regexp to extract the next page token. """ m = re.search('(?<=nextPageToken": )(?:")?([0-9]*?:[0-9]*)|null', resultString) if m is not None: return m.group(1) return None
151a5697561b687aeff8af51c4ec2f73d47c441d
26,963
def underscore(msg): """ return underlined msg """ return __apply_style(__format['underscore'],msg)
ded741e58d1f6e46fc4b9f56d57947903a8a2587
26,964
def get_slice(dimspins, y): """ Get slice of variable `y` inquiring the spinboxes `dimspins`. Parameters ---------- dimspins : list List of tk.Spinbox widgets of dimensions y : ndarray or netCDF4._netCDF4.Variable Input array or netcdf variable Returns ------- ndarray Slice of `y` chosen by with spinboxes. Examples -------- >>> vy = vardim2var(y) >>> yy = self.fi.variables[vy] >>> miss = get_miss(self, yy) >>> yy = get_slice_y(self.yd, yy).squeeze() >>> yy = set_miss(miss, yy) """ methods = ['all'] methods.extend(DIMMETHODS) dd = [] ss = [] for i in range(y.ndim): dim = dimspins[i].get() if dim in methods: s = slice(0, y.shape[i]) else: idim = int(dim) s = slice(idim, idim+1) dd.append(dim) ss.append(s) if len(ss) > 0: imeth = list_intersection(dd, DIMMETHODS) if len(imeth) > 0: yout = y[tuple(ss)] ii = [ i for i, d in enumerate(dd) if d in imeth ] ii.reverse() # last axis first for i in ii: if dd[i] == 'mean': yout = np.ma.mean(yout, axis=i) elif dd[i] == 'std': yout = np.ma.std(yout, axis=i) elif dd[i] == 'min': yout = np.ma.min(yout, axis=i) elif dd[i] == 'max': yout = np.ma.max(yout, axis=i) elif dd[i] == 'ptp': yout = np.ma.ptp(yout, axis=i) elif dd[i] == 'sum': yout = np.ma.sum(yout, axis=i) elif dd[i] == 'median': yout = np.ma.median(yout, axis=i) elif dd[i] == 'var': yout = np.ma.var(yout, axis=i) return yout else: return y[tuple(ss)] else: return np.array([], dtype=y.dtype)
3545391babb06c7cae5dc8fc6f413d34e40da57c
26,965
import requests import json def query_real_confs(body=None): # noqa: E501 """ query the real configuration value in the current hostId node query the real configuration value in the current hostId node # noqa: E501 :param body: :type body: dict | bytes :rtype: List[RealConfInfo] """ if connexion.request.is_json: body = ConfHost.from_dict(connexion.request.get_json()) # noqa: E501 domain = body.domain_name host_list = body.host_ids check_res = Format.domainCheck(domain) if not check_res: num = 400 base_rsp = BaseResponse(num, "Failed to verify the input parameter, please check the input parameters.") return base_rsp, num # check the domain is Exist is_exist = Format.isDomainExist(domain) if not is_exist: code_num = 400 base_rsp = BaseResponse(code_num, "The current domain does not exist, please create the domain first.") return base_rsp, code_num # check whether the host is configured in the domain is_host_list_exist = Format.isHostInDomain(domain) print("is_host_list_exist is : {}".format(is_host_list_exist)) if not is_host_list_exist: code_num = 400 base_rsp = BaseResponse(code_num, "The host information is not set in the current domain." + "Please add the host information first") return base_rsp, code_num # get all hosts managed by the current domain. # If host_list is empty, query all hosts in the current domain. # If host_list is not empty, the actual contents of the currently given host are queried. conf_tools = ConfTools() port = conf_tools.load_port_by_conf() exist_host = [] failed_host = [] if len(host_list) > 0: host_tool = HostTools() exist_host, failed_host = host_tool.getHostExistStatus(domain, host_list) else: print("############## get the host in domain ##############") url = "http://0.0.0.0:" + port + "/host/getHost" headers = {"Content-Type": "application/json"} get_man_host = DomainName(domain_name=domain) response = requests.post(url, data=json.dumps(get_man_host), headers=headers) # post request print("host/getHost response is : {}".format(response.text)) res_code = response.status_code res_text = json.loads(response.text) print("host/getHost return code is : {}".format(response.status_code)) if len(exist_host) == 0 or len(failed_host) == len(host_list): code_num = 400 base_rsp = BaseResponse(code_num, "The host information is not set in the current domain." + "Please add the host information first") return base_rsp, code_num # get the management conf in domain print("############## get the management conf in domain ##############") url = "http://0.0.0.0:" + port + "/management/getManagementConf" headers = {"Content-Type": "application/json"} get_man_conf_body = DomainName(domain_name=domain) print("body is : {}".format(get_man_conf_body)) response = requests.post(url, data=json.dumps(get_man_conf_body), headers=headers) # post request print("response is : {}".format(response.text)) res_code = response.status_code res_text = json.loads(response.text) print("return code is : {}".format(response.status_code)) if res_code != 200: code_num = res_code base_rsp = BaseResponse(code_num, "Failed to query the configuration items managed in the current domain. " + "The failure reason is:" + res_text) return base_rsp, code_num conf_files = res_text.get("confFiles") if len(conf_files) == 0: code_num = 400 base_rsp = BaseResponse(code_num, "The configuration is not set in the current domain." + "Please add the configuration information first") return base_rsp, code_num res = [] # get the real conf in host conf_list = [] for d_conf in conf_files: file_path = d_conf.get("filePath").split(":")[-1] conf_list.append(file_path) print("############## get the real conf in host ##############") get_real_conf_body = {} get_real_conf_body_info = [] for d_host in exist_host: get_real_conf_body_infos = {} get_real_conf_body_infos["host_id"] = d_host get_real_conf_body_infos["config_list"] = conf_list get_real_conf_body_info.append(get_real_conf_body_infos) get_real_conf_body["infos"] = get_real_conf_body_info url = conf_tools.load_url_by_conf().get("collect_url") headers = {"Content-Type": "application/json"} response = requests.post(url, data=json.dumps(get_real_conf_body), headers=headers) # post request resp = json.loads(response.text).get("resp") resp_code = json.loads(response.text).get("code") if (resp_code != 200) and (resp_code != 206): code_num = 404 code_string = "Failed to obtain the actual configuration, please check the file exists." base_rsp = BaseResponse(code_num, code_string) return base_rsp, code_num if not resp or len(resp) == 0: code_num = 500 code_string = "Failed to obtain the actual configuration, please check the host info for conf/collect." base_rsp = BaseResponse(code_num, code_string) return base_rsp, code_num success_lists = {} failed_lists = {} for d_res in resp: d_host_id = d_res.get("host_id") fail_files = d_res.get("fail_files") if len(fail_files) > 0: failed_lists["host_id"] = d_host_id failed_lists_conf = [] for d_failed in fail_files: failed_lists_conf.append(d_failed) failed_lists["failed_conf"] = failed_lists_conf failed_lists["success_conf"] = [] else: success_lists["host_id"] = d_host_id success_lists["success_conf"] = [] success_lists["failed_conf"] = [] read_conf_info = RealConfInfo(domain_name=domain, host_id=d_host_id, conf_base_infos=[]) d_res_infos = d_res.get("infos") for d_file in d_res_infos: file_path = d_file.get("path") content = d_file.get("content") object_parse = ObjectParse() content_string = object_parse.parse_conf_to_json(file_path, content) file_atrr = d_file.get("file_attr").get("mode") file_owner = "({}, {})".format(d_file.get("file_attr").get("group"), d_file.get("file_attr").get("owner")) real_conf_base_info = RealconfBaseInfo(file_path=file_path, file_attr=file_atrr, file_owner=file_owner, conf_contens=content_string) read_conf_info.conf_base_infos.append(real_conf_base_info) if len(fail_files) > 0: failed_lists.get("success_conf").append(file_path) else: success_lists.get("success_conf").append(file_path) res.append(read_conf_info) print("***************************************") print("success_lists is : {}".format(success_lists)) print("failed_lists is : {}".format(failed_lists)) if len(res) == 0: code_num = 400 res_text = "The real configuration does not found." base_rsp = BaseResponse(code_num, "Real configuration query failed." + "The failure reason is : " + res_text) return base_rsp, code_num return res
1313792649942f402694713df0d413fe39b8a77c
26,966
def is_data(data): """ Check if a packet is a data packet. """ return len(data) > 26 and ord(data[25]) == 0x08 and ord(data[26]) in [0x42, 0x62]
edb2a6b69fde42aef75923a2afbd5736d1aca660
26,968
def _bool_value(ctx, define_name, default, *, config_vars = None): """Looks up a define on ctx for a boolean value. Will also report an error if the value is not a supported value. Args: ctx: A Starlark context. Deprecated. define_name: The name of the define to look up. default: The value to return if the define isn't found. config_vars: A dictionary (String to String) of configuration variables. Can be from ctx.var. Returns: True/False or the default value if the define wasn't found. """ if not config_vars: config_vars = ctx.var value = config_vars.get(define_name, None) if value != None: if value.lower() in ("true", "yes", "1"): return True if value.lower() in ("false", "no", "0"): return False fail("Valid values for --define={} are: true|yes|1 or false|no|0.".format( define_name, )) return default
c60799e3019c6acefd74115ca02b76feb9c72237
26,969
def get_tf_metric(text): """ Computes the tf metric Params: text (tuple): tuple of words Returns: tf_text: format: ((word1, word2, ...), (tf1, tf2, ...)) """ counts = [text.count(word) for word in text] max_count = max(counts) tf = [counts[i]/max_count for i in range(0, len(counts))] return text, tf
6397e150fa55a056358f4b28cdf8a74abdc7fdb6
26,970
import torch def R_transform_th(R_src, R_delta, rot_coord="CAMERA"): """transform R_src use R_delta. :param R_src: matrix :param R_delta: :param rot_coord: :return: """ if rot_coord.lower() == "model": R_output = torch.matmul(R_src, R_delta) elif rot_coord.lower() == "camera" or rot_coord.lower() == "naive" or rot_coord.lower() == "camera_new": # dR_m2c x R_src_m2c R_output = torch.matmul(R_delta, R_src) else: raise Exception("Unknown rot_coord in R_transform: {}".format(rot_coord)) return R_output
67d4b94bcc9382fae93cc926246fb2436eac7173
26,971
def calculate_UMI_with_mismatch(UMIs): """ Corrected the mismatches in UMIs input: UMI sequences and their counts; return: Corrected unique UMI sequences """ if len(UMIs.keys()) == 1: return [x for x in UMIs if UMIs[x]>0] UMIs = sorted(UMIs.items(), key=lambda k: k[1], reverse=True) UMI_info = {x[0]:x[1] for x in UMIs} umi_num = len(UMIs) if umi_num <= 10: for idx1 in range(0, umi_num-1): for idx2 in range(idx1+1, umi_num): umi_1 = UMIs[idx1][0] umi_2 = UMIs[idx2][0] if HammingDistance(umi_1, umi_2) <= 1: UMI_info[umi_1] += UMI_info[umi_2] UMI_info[umi_2] = 0 return [x for x in UMI_info if UMI_info[x]>0]
c0e24bf7043b3041043187ca78c8b8f5cafae7cc
26,972
def create_import_data(properties): """ This function collects and creates all the asset data needed for the import process. :param object properties: The property group that contains variables that maintain the addon's correct state. :return list: A list of dictionaries containing the both the mesh and action import data. """ # if using ue2rigify un-hide the source rig if properties.use_ue2rigify: set_source_rig_hide_value(False) # get the mesh and rig objects from their collections mesh_objects = utilities.get_from_collection(properties.mesh_collection_name, 'MESH', properties) rig_objects = utilities.get_from_collection(properties.rig_collection_name, 'ARMATURE', properties) # if the combine meshes option is on, get only meshes with unique armature parents mesh_objects = utilities.get_unique_parent_mesh_objects(rig_objects, mesh_objects, properties) # get the asset data for all the mesh objects mesh_data = create_mesh_data(mesh_objects, rig_objects, properties) # get the asset data for all the actions on the rig objects action_data = create_action_data(rig_objects, properties) # if using ue2rigify re-hide the source rig if properties.use_ue2rigify: set_source_rig_hide_value(True) return mesh_data + action_data
b8b28ac4a1d753214dbcd1361b1ababf7f366b55
26,973
def _parse_vertex_tuple(s): """Parse vertex indices in '/' separated form (like 'i/j/k', 'i//k'. ...). """ vt = [0, 0, 0] for i, c in enumerate(s.split("/")): if c: vt[i] = int(c) return tuple(vt)
37e53236ef7a96f55aed36e929abe4472911b9ea
26,975
def getKeyFromValue(dictionary, value): """ dictionary内に指定したvalueを持つKeyを検索して取得 """ keys = [key for key, val in dictionary.items() if val == value] if len(keys) > 0: return keys[0] return None
d2bb42938a809677f4a96e869e9e03c194a28561
26,976
def withdraw(dest): """ This function defines all the FlowSpec rules to be withdrawn via the iBGP Update. ////***update*** Add port-range feature similar to announce() - ADDED in TBowlby's code. Args: dest (str): IP Address of the Victim host. Calls: send_requests(messages): Calls a function to execute requests API commands to be sent to the Flask Server. Returns: Returns the string 'route_withdrawn' to confirm the withdrawal of routes so the entry can be deleted from the MySQL database. """ messages = [ 'withdraw flow route { match { destination %s/32; source-port =53; protocol udp; } then { rate-limit DNS_RATE_LIMIT; community [ COMMUNITY ]; } }' % dest, 'sleep', 'withdraw flow route { match { destination %s/32; source-port =123; protocol udp; packet-length =468; } then { discard; community [ COMMUNITY ]; } }' % dest, 'sleep', 'withdraw flow route { match { destination %s/32; protocol icmp; } then { rate-limit ICMP_RATE_LIMIT; community [ COMMUNITY ]; } }' % dest, 'sleep', 'withdraw flow route { match { destination %s/32; source-port =17 =19 =69 =111 =137 =138 =161 =162 =389 =520 =1434 =1701 =1900 =5353 =11211; protocol udp; } then { discard; community [ COMMUNITY ]; } }' % dest, 'sleep', 'withdraw flow route { match { destination %s/32; source-port =53; destination-port =4444; protocol udp; } then { discard; community [ COMMUNITY ]; } }' % dest, 'sleep', 'withdraw flow route { match { destination %s/32; protocol udp; fragment is-fragment; } then { discard; community [ COMMUNITY ]; } }' % dest, 'sleep', 'withdraw flow route { match { destination %s/32; protocol tcp; tcp-flags [ syn ]; } then { rate-limit SYN_RATE_LIMIT; community [ COMMUNITY ]; } }' % dest, 'sleep', 'withdraw flow route { match { destination %s/32; } then { rate-limit MAX_SPEED; community [ COMMUNITY ]; } }' % dest, ] send_requests(messages) return 'route_withdrawn'
2e0767630c72d69a914175e6bcc808d9b088b247
26,977
def get_prefix(node): """ Strips off the name in the URI to give the prefixlabel... :param node: The full URI string :return: (prefix, label) as (string, string) """ if '#' in node: name = node.split("#")[-1] else: # there must be no # in the prefix e.g. schema.org/ name = node.split("/")[-1] return node[:-len(name)]
5d005548da722751cdd0ae022994de5f39f9ac56
26,978
def sY(qubit: Qubit, coefficient: complex = 1.0) -> Pauli: """Return the Pauli sigma_Y operator acting on the given qubit""" return Pauli.sigma(qubit, 'Y', coefficient)
8d2444f4e9a4b9e3734a1d7ec1e686f06ded0c89
26,979
def load_data_and_labels(filename): """Load sentences and labels""" df = pd.read_csv(filename, compression='zip', dtype={'faits': object}, encoding = 'utf8') selected = [ATTRIBUTE_TO_PREDICT, 'faits'] non_selected = list(set(df.columns) - set(selected)) df = df.drop(non_selected, axis=1) # Drop non selected columns df = df.dropna(axis=0, how='any', subset=selected) # Drop null rows df = df.reindex(np.random.permutation(df.index)) # Shuffle the dataframe # Map the actual labels to one hot labels labels = sorted(list(set(df[selected[0]].tolist()))) one_hot = np.zeros((len(labels), len(labels)), int) np.fill_diagonal(one_hot, 1) label_dict = dict(zip(labels, one_hot)) chk_count['n'] = len(df[selected[1]]) x_raw = df[selected[1]].apply(lambda x: clean_str(x)).tolist() y_raw = df[selected[0]].apply(lambda y: label_dict[y]).tolist() return x_raw, y_raw, df, labels
651b156801dcdd5b847ab1eb3330afe569c6b63e
26,980
def ovc_search(request): """Method to do ovc search.""" try: results = search_master(request) except Exception as e: print('error with search - %s' % (str(e))) return JsonResponse(results, content_type='application/json', safe=False) else: return JsonResponse(results, content_type='application/json', safe=False)
f571627dba30a3f0a1e958e484c528fa3338defa
26,981
import struct def incdata(data, s): """ add 's' to each byte. This is useful for finding the correct shift from an incorrectly shifted chunk. """ return b"".join(struct.pack("<B", (_ + s) & 0xFF) for _ in data)
89633d232d655183bee7a20bd0e1c5a4a2cc7c05
26,982
from typing import Tuple def nonsquare_hungarian_matching( weights: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: """Hungarian matching with arbitrary shape. The matchers_ops.hungarian_matching supports only squared weight matrices. This function generalizes the hungarian matching to nonsquare cases by padding the weights to a square and running the square version matching. The property of hungarian matching ensures that the solutions are equivalent for the padded square problem and the original nonsquare problem. Args: weights: A [batch, shape1, shape2] float32 tf.Tensor. Returns: square_permutation: A [batch, max(shape1, shape2), max(shape1, shape2)] float32 tf.Tensor that is the permutation matrix that achieves the minimum total weight. Note that a permutation matrix contains only value 0.0 and 1.0, with each row and each column sums to 1.0. nonsquare_permutation: A [batch, shape1, shape2] float32 tf.Tensor. The nonsquare part of the permutation matrix. """ _, height, width = weights.get_shape().as_list() max_height_width = max(height, width) # Padding a constant on one axis does not affect matching results. weights = tf.pad(weights, [[0, 0], # Do not pad the batch dimension. [0, max_height_width - height], [0, max_height_width - width]], constant_values=_MATCHING_NEGATIVE_CONSTANT) square_permutation = matchers_ops.hungarian_matching(weights) square_permutation = tf.cast(square_permutation, tf.float32) return square_permutation, square_permutation[:, :height, :width]
02968da51da1d65020b544bb2467ecbe3ba4ab96
26,983
import string def strip_non_printable(value): """ Removes any non-printable characters and adds an indicator to the string when binary characters are fonud :param value: the value that you wish to strip """ if value is None: return None # Filter all non-printable characters # (note that we must use join to account for the fact that Python 3 # returns a generator) printable_value = ''.join(filter(lambda c: c in string.printable, value)) if printable_value != value: if printable_value: printable_value += ' ' printable_value += '(contains binary)' return printable_value
279ea769bd7d57ee3e4feb9faf10f2a3af3aa657
26,985
def flatten(name): """Get a flatten layer. Parameters ---------- name : string the name of the flatten layer Returns ------- flatten : keras.layers.core.Flatten """ if LIB_TYPE == "keras": return Flatten(name=name)
b395162b7551d4292a89a2128b651305df294069
26,986
import math def tangent_circle(dist, radius): """ return tangent angle to a circle placed at (dist, 0.0) with radius=radius For non-existing tangent use 100 degrees. """ if dist >= radius: return math.asin(radius/float(dist)) return math.radians(100)
bcde88456a267239566f22bb6ea5cf00f64fa08e
26,987
import re def find_backup_path(docsents, q, cand, k=40): """ If no path is found create a dummy backup path :param docsents: :param q: :param cand: :param k: :return: """ path_for_cand_dict = {"he_docidx": None, "he_locs": None, "e1wh_loc": None, "e1_docidx": None, "e1_locs": None, "cand_docidx": None, "cand_locs": None, "he_words": ["BACKUP"], "e1wh": "BACKUP", "e1": "BACKUP", "cand_words": ["BACKUP"] } ent_words = [qtok for qtok in q if qtok not in STOPWORDS] flag = 0 for entw in ent_words: he = entw.lower() if len(he.split()) == 0: path_for_cand_dict['he_docidx'] = 0 path_for_cand_dict['he_locs'] = [(-1, -1)] else: pat_he = re.compile('(^|\W)' + re.escape(he) + '\W') for docssidx, docss in enumerate(docsents): doc = ' '.join(' '.join(sum(docss, [])).split()) doc = doc.lower() he_objs = [] for x in pat_he.finditer(doc): he_objs.append(x) if len(he_objs) > 0: flag = 1 path_for_cand_dict['he_docidx'] = docssidx path_for_cand_dict['he_locs'] = get_locs_given_objs(doc, he, he_objs)[:k] break if flag == 1: break cand_toks = cand.split() cand_words = [candtok for candtok in cand_toks if candtok not in STOPWORDS] flag = 0 for cand in cand_words: cand = cand.lower() pat_cand = re.compile('(^|\W)' + re.escape(cand) + '\W') for docssidx, docss in enumerate(docsents): doc = ' '.join(' '.join(sum(docss, [])).split()) doc = doc.lower() ca_objs = [] for x in pat_cand.finditer(doc): ca_objs.append(x) if len(ca_objs) > 0: flag = 1 path_for_cand_dict['cand_docidx'] = docssidx path_for_cand_dict['cand_locs'] = get_locs_given_objs(doc, cand, ca_objs)[:k] break if flag == 1: break if path_for_cand_dict['he_docidx'] is None or path_for_cand_dict['he_locs'] is None: path_for_cand_dict['he_docidx'] = 0 path_for_cand_dict['he_locs'] = [(-1, -1)] if path_for_cand_dict['cand_docidx'] is None or path_for_cand_dict['cand_locs'] is None: path_for_cand_dict['cand_docidx'] = 0 path_for_cand_dict['cand_locs'] = [(0, 0)] return path_for_cand_dict
10a71c623da6c185a1e1cc1242b2e0402208837c
26,988
def state_transitions(): """Simplified state transition dictionary""" return { "E": {"A": {"(0, 9)": 1}}, "A": {"I": {"(0, 9)": 1}}, "I": {"H": {"(0, 9)": 1}}, "H": {"R": {"(0, 9)": 1}} }
f8c79f8071f2b61ceacaacf3406a198b2c54c917
26,989
import json def send(socket, action, opts=None, request_response=True, return_type='auto'): """Send a request to an RPC server. Parameters ---------- socket : zmq socket The ZeroMQ socket that is connected to the server. action : str Name of action server should perform. See :func:`RPCClient.send()` for a list of actions and their associated options. opts : dict or None An optional dict of options specifying the behavior of the action. request_response : bool If True, then the server is asked to send a response. return_type : str 'proxy' to force the server to send return values by proxy, or 'auto' to allow the server to decide whether to return by proxy or by value. """ global next_req_id # If we want the server to send a response, then we must supply a unique ID # for the request. Otherwise, send -1 as the request ID to indicate that # the server should not send a reply. if request_response: req_id = next_req_id next_req_id += 1 else: req_id = -1 # Serialize opts if it was specified, otherwise send an empty string. if opts is None: opts_str = b'' else: opts_str = json.dumps(opts).encode() # Tell the server which serializer we are using ser_type = b'json' # Send the request as a multipart message msg = [str(req_id).encode(), action.encode(), return_type.encode(), ser_type, opts_str] socket.send_multipart(msg) # Print so we can see what the final json-encoded message looks like msg = '\n'.join([' ' + m.decode() for m in msg]) print("\n>>> send to %s:\n%s" % (socket.last_endpoint.decode(), msg)) # Return the request ID we can use to listen for a response later. return req_id
9a2dcf2fb78c1458c0dead23c4bcc451f1316731
26,990
def brighter(data, data_mean=None): """ Brighter set of parameters for density remap. Parameters ---------- data : numpy.ndarray data_mean : None|float|int Returns ------- numpy.ndarray """ return clip_cast(amplitude_to_density(data, dmin=60, mmult=40, data_mean=data_mean))
d00688ac99fb509ad0fe0e2f35cc5c03f598bfee
26,991
import logging import sqlite3 def get_lensed_host_fluxes(host_truth_db_file, image_dir, bands='ugrizy', components=('bulge', 'disk'), host_types=('agn', 'sne'), verbose=False): """ Loop over entries in `agn_hosts` and `sne_hosts` tables in the host_truth_db_file and compute fluxes (with and without MW extinction) in each band. Return dicts of fluxes and coordinates keyed by object ids. Parameters ---------- host_truth_db_file: str File containing model parameters for lensed host of AGNs and SNe. image_dir: str Directory containing the FITS stamps. bands: str or list-like ['ugrizy'] Bands for which to return magnorms. components: list-like [('bulge', 'disk')] Galaxy components of lensed hosts. host_types: list-like [('agn', 'sne')] Types of hosted objects. verbose: bool [False] Verbose flag. Returns ------- (dict, dict, dict): dicts of fluxes with MW extinction, w/out MW extinction, and a dict of (ra, dec, redshift) tuples, all keyed by object id. """ logger = logging.getLogger('get_lensed_host_fluxes') if verbose: logger.setLevel(logging.INFO) logger.info('processing %s', host_truth_db_file) band_fluxes = lambda: {band:0 for band in bands} fluxes = defaultdict(band_fluxes) fluxes_noMW = defaultdict(band_fluxes) num_photons = defaultdict(band_fluxes) coords = dict() mag_norms = dict() with sqlite3.connect(host_truth_db_file) as conn: for host_type in host_types: df = pd.read_sql(f'select * from {host_type}_hosts', conn) for component in components: mag_norms[component] = get_mag_norms(host_type, component, image_dir) for iloc in range(len(df)): logger.info('%s %d %d', host_type, iloc, len(df)) row = df.iloc[iloc] ra = row['ra_lens'] dec = row['dec_lens'] redshift = row['redshift'] unique_id = str(row['unique_id']) coords[unique_id] = [ra, dec, redshift] gAv = row['av_mw'] gRv = row['rv_mw'] for component in components: if unique_id not in mag_norms[component]: continue sed_file = find_sed_file( row[f'sed_{component}_host'].lstrip('b').strip("'")) iAv = row[f'av_internal_{component}'] iRv = row[f'rv_internal_{component}'] for band in bands: mag_norm = mag_norms[component][unique_id][band] synth_phot = SyntheticPhotometry(sed_file, mag_norm, redshift=redshift, iAv=iAv, iRv=iRv) fluxes_noMW[unique_id][band] \ += synth_phot.calcFlux(band) synth_phot.add_dust(gAv, gRv, 'Galactic') fluxes[unique_id][band] += synth_phot.calcFlux(band) bp = synth_phot.bp_dict[band] photpars = PhotometricParameters(nexp=1, exptime=30, gain=1, bandpass=band) num_photons[unique_id][band] \ += synth_phot.sed.calcADU(bp, photpars) return dict(fluxes), dict(fluxes_noMW), dict(num_photons), coords
1b28c58ed824b988e02d40091ac633bc8187d27a
26,994
from pathlib import Path def load_challenges() -> list[Challenge]: """ Loads all challenges. Returns ------- list[Challenge] All loaded challenges. """ __challenges.clear() modules = [] for lib in (Path(__file__).parent / "saves/challenges").iterdir(): if not lib.name.endswith(".py") or lib.name.startswith("_"): continue modules.append(lib.name.removesuffix(".py")) for module in sorted(modules): __challenges.append( import_module(".saves.challenges." + module, __package__).challenge # noqa ) return __challenges
d6cd5d65d572ba081d5f2ba0bca67c755bc57d2d
26,995
def get_new_user_data(GET_params): """Return the data necessary to create a new OLD user or update an existing one. :param GET_params: the ``request.GET`` dictionary-like object generated by Pylons which contains the query string parameters of the request. :returns: A dictionary whose values are lists of objects needed to create or update user. If ``GET_params`` has no keys, then return all data. If ``GET_params`` does have keys, then for each key whose value is a non-empty string (and not a valid ISO 8601 datetime) add the appropriate list of objects to the return dictionary. If the value of a key is a valid ISO 8601 datetime string, add the corresponding list of objects *only* if the datetime does *not* match the most recent ``datetime_modified`` value of the resource. That is, a non-matching datetime indicates that the requester has out-of-date data. """ # model_name_map maps param names to the OLD model objects from which they are # derived. model_name_map = {'orthographies': 'Orthography'} # getter_map maps param names to getter functions that retrieve the # appropriate data from the db. getter_map = {'orthographies': h.get_mini_dicts_getter('Orthography')} # result is initialized as a dict with empty list values. result = dict([(key, []) for key in getter_map]) result['roles'] = h.user_roles result['markup_languages'] = h.markup_languages # There are GET params, so we are selective in what we return. if GET_params: for key in getter_map: val = GET_params.get(key) # Proceed so long as val is not an empty string. if val: val_as_datetime_obj = h.datetime_string2datetime(val) if val_as_datetime_obj: # Value of param is an ISO 8601 datetime string that # does not match the most recent datetime_modified of the # relevant model in the db: therefore we return a list # of objects/dicts. If the datetimes do match, this # indicates that the requester's own stores are # up-to-date so we return nothing. if val_as_datetime_obj != h.get_most_recent_modification_datetime( model_name_map[key]): result[key] = getter_map[key]() else: result[key] = getter_map[key]() # There are no GET params, so we get everything from the db and return it. else: for key in getter_map: result[key] = getter_map[key]() return result
61ee952088bb37a2f5171f0bdd0ed9a59d66bed7
26,996
import six def add_heatmap_summary(feature_query, feature_map, name): """Plots dot produce of feature_query on feature_map. Args: feature_query: Batch x embedding size tensor of goal embeddings feature_map: Batch x h x w x embedding size of pregrasp scene embeddings name: string to name tensorflow summaries Returns: Batch x h x w x 1 heatmap """ batch, dim = feature_query.shape reshaped_query = tf.reshape(feature_query, (int(batch), 1, 1, int(dim))) heatmaps = tf.reduce_sum( tf.multiply(feature_map, reshaped_query), axis=3, keep_dims=True) tf.summary.image(name, heatmaps) shape = tf.shape(heatmaps) softmaxheatmaps = tf.nn.softmax(tf.reshape(heatmaps, (int(batch), -1))) tf.summary.image( six.ensure_str(name) + 'softmax', tf.reshape(softmaxheatmaps, shape)) return heatmaps
d7807942a2e3d92b4653d822b24686b649a6df88
26,997
def ParseVecFile(filename): """Parse a vector art file and return an Art object for it. Right now, handled file types are: EPS, Adobe Illustrator, PDF Args: filename: string - name of the file to read and parse Returns: geom.Art: object containing paths drawn in the file. Return None if there was a major problem reading the file. """ (major, minor) = ClassifyFile(filename) if (major == "error"): print("Couldn't get Art:", minor) return None if major == "pdf" or (major == "ai" and minor == "pdf"): contents = pdf.ReadPDFPageOneContents(filename) if contents: toks = TokenizeAIEPS(contents) return ParsePS(toks, major, minor) else: return None elif major == "eps" or (major == "ai" and minor == "eps"): toks = TokenizeAIEPSFile(filename) return ParsePS(toks, major, minor) elif major == "svg": return svg.ParseSVGFile(filename) else: return None
accf0446a4600de77cf41fdc5a586f930156dfd6
26,998
import torch def batchnorm_to_float(module): """Converts batch norm to FP32""" if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): module.float() for child in module.children(): batchnorm_to_float(child) return module
bf9ad7cbda5984465f5dcb5f693ba71c8a0ab583
26,999
def get_mat_2d(sequence, rnn=False): """Uses aa_to_map to turn a sequence into a 3D array representation of the protein. """ if rnn: mat = np.zeros((len(sequence), 36)) for i, aa in enumerate(sequence): mat[i] = aa_to_map(aa)[:,:6,:].flatten() else: mat = np.zeros((2*len(sequence), 8, 6)) for i, aa in enumerate(sequence): mat[2*i] = aa_to_map(aa) return mat
122eb7f890995eb4d95226251bb5f2c9a4ba38df
27,000
from datetime import datetime def TimeSec(): """[Takes current time in and convert into seconds.] Returns: [float]: [Time in seconds] """ now = datetime.now() return now.second+(now.minute*60)+(now.hour*60*60)
58892b89feb05a56c27d4fd62ba174f9d1c09591
27,001
def partition_variable(variable, partition_dict): """ As partition_shape() but takes a mapping of dimension-name to number of partitions as it's second argument. <variable> is a VariableWrapper instance. """ partitions = [] for dim in variable.dimensions: if dim.name in partition_dict: partitions.append(partition_dict[dim.name]) else: partitions.append(1) return partition_shape(variable.shape, partitions)
7ffd4075bbb6bbd156f76c9271101003c5db8c1e
27,002
def _get_object_properties(agent, properties, obj_type, obj_property_name, obj_property_value, include_mors=False): """ Helper method to simplify retrieving of properties This method is used by the '*.get' vPoller Worker methods and is meant for collecting properties for a single managed object. We first search for the object with property name and value, then create a list view for this object and finally collect it's properties. Args: agent (VConnector): A VConnector instance properties (list): List of properties to be collected obj_type pyVmomi.vim.*): Type of vSphere managed object obj_property_name (str): Property name used for searching for the object obj_property_value (str): Property value identifying the object in question Returns: The collected properties for this managed object in JSON format """ logger.info( '[%s] Retrieving properties for %s managed object of type %s', agent.host, obj_property_value, obj_type.__name__ ) # Find the Managed Object reference for the requested object try: obj = agent.get_object_by_property( property_name=obj_property_name, property_value=obj_property_value, obj_type=obj_type ) except Exception as e: return {'success': 1, 'msg': 'Cannot collect properties: {}'.format(e.message)} if not obj: return { 'success': 1, 'msg': 'Cannot find object {}'.format(obj_property_value) } # Create a list view for this object and collect properties view_ref = agent.get_list_view(obj=[obj]) try: data = agent.collect_properties( view_ref=view_ref, obj_type=obj_type, path_set=properties, include_mors=include_mors ) except Exception as e: return {'success': 1, 'msg': 'Cannot collect properties: {}'.format(e.message)} view_ref.DestroyView() result = { 'success': 0, 'msg': 'Successfully retrieved object properties', 'result': data, } return result
d2e43bcc1700e76a7ca117eaf419d1c5ef941975
27,003
def get_crypto_currency_pairs(info=None): """Gets a list of all the cypto currencies that you can trade :param info: Will filter the results to have a list of the values that correspond to key that matches info. :type info: Optional[str] :returns: If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \ Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info. """ url = urls.crypto_currency_pairs() data = helper.request_get(url, 'results') return(helper.filter(data, info))
b44a013d6bbf348321c4f00006262e1ab02e0459
27,004
def sparse_graph_convolution_layers(name, inputs, units, reuse=True): """ This one is used by the Joint_SMRGCN model; A crude prototypical operation """ with tf.variable_scope(name, reuse=tf.AUTO_REUSE if reuse else False): # adj_tensor: list (size nb_bonds) of [length, length] matrices adj_tensor, hidden_tensor, node_tensor = inputs annotations = hidden_tensor if hidden_tensor is not None else node_tensor input_dim = annotations.get_shape().as_list()[-1] nb_bonds = len(adj_tensor) output = [] for i in range(nb_bonds): msg_bond = linear('lt_bond_%d' % (i + 1), input_dim, units, annotations, biases=False, variables_on_cpu=False) output.append(tf.sparse_tensor_dense_matmul(adj_tensor[i], msg_bond)) output = tf.add_n(output) / nb_bonds # self-connection \approx residual connection output = output + linear('self-connect', input_dim, units, annotations, variables_on_cpu=False) return output
c5c16242fb175e851a78a34249c2f902bd2e9cb4
27,005
def update_or_create_tags(observer, repo, tag=None, type_to_update=None): """Create or update tags.""" observer.update_state( state='PROGRESS', meta='Retrieving data and media from Github' ) git = GithubAPI(repo) if tag: data, media = git.get_data(tag) if type_to_update == "ssot": populate_media(observer, media, tag) populate_data(observer, data, tag) populate_index(observer, tag, type_to_update) observer.update_state( state='SUCCESS', meta='All tasks complete' ) return True
3f478c66cda9648cb72325e9668ea08b52147fbf
27,007
def confirm_api_access_changes(request): """Renders the confirmation page to confirm the successful changes made to the API access settings for the superuser's group. Parameters: request - The request object sent with the call to the confirm page if the requested changes were successfully made to the API access settings. """ product_name = request.user.get_profile().api_access_data.product_name return render_to_response('confirm_api_access_changes.html', {'product_name': product_name}, context_instance=RequestContext(request))
b31ce15ec72607200edd66e72df99b5ad9cb4afc
27,008
def mpl_hill_shade(data, terrain=None, cmap=DEF_CMAP, vmin=None, vmax=None, norm=None, blend_function=rgb_blending, azimuth=DEF_AZIMUTH, elevation=DEF_ELEVATION): """ Hill shading that uses the matplotlib intensities. Is only for making comparison between blending methods where we need to include the matplotlib hill shading. For all other plots we can use the combined_intensities function that is used in the regular hill_shade() """ if terrain is None: terrain = data assert data.ndim == 2, "data must be 2 dimensional" assert terrain.shape == data.shape, "{} != {}".format(terrain.shape, data.shape) norm_intensities = mpl_surface_intensity(terrain, azimuth=azimuth, elevation=elevation) rgba = color_data(data, cmap=cmap, vmin=vmin, vmax=vmax, norm=norm) return blend_function(rgba, norm_intensities)
6a881794b486e581f817bf073c7cbef465d8d504
27,009
def img_docs(filename='paths.ini', section='PATHS'): """ Serve the PATH to the img docs directory """ parser = ConfigParser() parser.read(filename) docs = {} if parser.has_section(section): params = parser.items(section) for param in params: docs[param[0]] = param[1] else: raise Exception('Section {0} not found in the {1} file'.format(section, filename)) return docs['path_to_img_db']
c76cc9ae17fb5dd8cfc6387349b6f47545fe01ad
27,010
def KGPhenio( directed = False, preprocess = "auto", load_nodes = True, load_node_types = True, load_edge_weights = True, auto_enable_tradeoffs = True, sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None, cache_sys_var = "GRAPH_CACHE_DIR", version = "current", **kwargs ) -> Graph: """Return kg-phenio graph Parameters ---------- directed = False preprocess = "auto" Preprocess for optimal load time & memory peak. Will preprocess in Linux/macOS but not Windows. load_nodes = True Load node names or use numeric range auto_enable_tradeoffs = True Enable when graph has < 50M edges cache_path = None Path to store graphs Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs` cache_sys_var = "GRAPH_CACHE_DIR" version = "current" Version to retrieve The available versions are: - 20220304 - 20220414 - 20220428 - 20220429 - 20220504 - 20220506 - 20220511 - 20220513 - 20220516 - 20220525 - 20220601 - 20220606 - current """ return AutomaticallyRetrievedGraph( "KGPhenio", version, "kghub", directed, preprocess, load_nodes, load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache, cache_path, cache_sys_var, kwargs )()
e4a3647013c0b250e29007c0db08a6d6813c8976
27,011
def _x_mul(a, b, digit=0): """ Grade school multiplication, ignoring the signs. Returns the absolute value of the product, or None if error. """ size_a = a.numdigits() size_b = b.numdigits() if a is b: # Efficient squaring per HAC, Algorithm 14.16: # http://www.cacr.math.uwaterloo.ca/hac/about/chap14.pdf # Gives slightly less than a 2x speedup when a == b, # via exploiting that each entry in the multiplication # pyramid appears twice (except for the size_a squares). z = rbigint([NULLDIGIT] * (size_a + size_b), 1) i = UDIGIT_TYPE(0) while i < size_a: f = a.widedigit(i) pz = i << 1 pa = i + 1 carry = z.widedigit(pz) + f * f z.setdigit(pz, carry) pz += 1 carry >>= SHIFT assert carry <= MASK # Now f is added in twice in each column of the # pyramid it appears. Same as adding f<<1 once. f <<= 1 while pa < size_a: carry += z.widedigit(pz) + a.widedigit(pa) * f pa += 1 z.setdigit(pz, carry) pz += 1 carry >>= SHIFT if carry: carry += z.widedigit(pz) z.setdigit(pz, carry) pz += 1 carry >>= SHIFT if carry: z.setdigit(pz, z.widedigit(pz) + carry) assert (carry >> SHIFT) == 0 i += 1 z._normalize() return z elif digit: if digit & (digit - 1) == 0: return b.lqshift(ptwotable[digit]) # Even if it's not power of two it can still be useful. return _muladd1(b, digit) # a is not b # use the following identity to reduce the number of operations # a * b = a_0*b_0 + sum_{i=1}^n(a_0*b_i + a_1*b_{i-1}) + a_1*b_n z = rbigint([NULLDIGIT] * (size_a + size_b), 1) i = UDIGIT_TYPE(0) size_a1 = UDIGIT_TYPE(size_a - 1) size_b1 = UDIGIT_TYPE(size_b - 1) while i < size_a1: f0 = a.widedigit(i) f1 = a.widedigit(i + 1) pz = i carry = z.widedigit(pz) + b.widedigit(0) * f0 z.setdigit(pz, carry) pz += 1 carry >>= SHIFT j = UDIGIT_TYPE(0) while j < size_b1: # this operation does not overflow using # SHIFT = (LONG_BIT // 2) - 1 = B - 1; in fact before it # carry and z.widedigit(pz) are less than 2**(B - 1); # b.widedigit(j + 1) * f0 < (2**(B-1) - 1)**2; so # carry + z.widedigit(pz) + b.widedigit(j + 1) * f0 + # b.widedigit(j) * f1 < 2**(2*B - 1) - 2**B < 2**LONG)BIT - 1 carry += z.widedigit(pz) + b.widedigit(j + 1) * f0 + \ b.widedigit(j) * f1 z.setdigit(pz, carry) pz += 1 carry >>= SHIFT j += 1 # carry < 2**(B + 1) - 2 carry += z.widedigit(pz) + b.widedigit(size_b1) * f1 z.setdigit(pz, carry) pz += 1 carry >>= SHIFT # carry < 4 if carry: z.setdigit(pz, carry) assert (carry >> SHIFT) == 0 i += 2 if size_a & 1: pz = size_a1 f = a.widedigit(pz) pb = 0 carry = _widen_digit(0) while pb < size_b: carry += z.widedigit(pz) + b.widedigit(pb) * f pb += 1 z.setdigit(pz, carry) pz += 1 carry >>= SHIFT if carry: z.setdigit(pz, z.widedigit(pz) + carry) z._normalize() return z
84dc948c03106ce26d9b4abf67c57b5d13438ef1
27,012
import re def server_version(headers): """Extract the firmware version from HTTP headers.""" version_re = re.compile(r"ServerTech-AWS/v(?P<version>\d+\.\d+\w+)") if headers.get("Server"): match = version_re.match(headers["Server"]) if match: return match.group("version")
24151f3898430f5395e69b4dd7c42bd678626381
27,013
def slice_constant(data, batch_size=32, name='constant_data', global_step=None): """Provide a slice based on the global_step. This is useful when the entire data array can be stored in memory because it allows you to feed the data very efficiently. Args: data: A numpy array or tensor. batch_size: The batch size for the produced data. name: An optional name for this data. global_step: A global step variable that is used to read the data. If None then the default prettytensor global_step is used. Returns: A tensor that produces the given data. """ with tf.name_scope(name): all_data = tf.convert_to_tensor(data) global_step = global_step or bookkeeper.global_step() count = len(data) / batch_size extra = len(data) - count * batch_size if extra: offset = tf.mod(global_step, count) return tf.slice(all_data, offset * batch_size, batch_size) else: offset = tf.mod(global_step, count + 1) return tf.slice(all_data, offset * batch_size, tf.where(tf.equal(offset, count), extra, batch_size))
53ebf9a6216841a4a4db8c2d77bd9545328454ac
27,015
def es_subcadena(adn1, adn2): """ (str, str) -> bool >>> es_subcadena('gatc', 'tta') False >>> es_subcadena('gtattt', 'atcgta') False :param:adn1:str:primera cadena a comparar :param:adn2:str:segunda cadena a comparar :return:bool:verificacion si una es subcadena de la otra """ if adn2 in adn1: return True else: return False
9c3605e74e1c9dbf227695a4f0f6431cc845a5f1
27,016
def get_labels_and_features(nested_embeddings): """ returns labels and embeddings """ x = nested_embeddings[:,:-1] y = nested_embeddings[:,-1] return x,y
302505bd3aa769570fa602760f7da1ddd017e940
27,017
def all_(f : a >> bool, t : r(a)) -> bool: """ all :: Foldable r => (a -> bool) -> r a -> bool Determines whether all elements of the structure satisfy the predicate. """ return DL.all_(toList(t))
ec19ae23b282affd99580b9614edaec8a8a2fd44
27,018
def log_binom_sum(lower, upper, obs_vote, n0_curr, n1_curr, b_1_curr, b_2_curr, prev): """ Helper function for computing log prob of convolution of binomial """ # votes_within_group_count is y_0i in Wakefield's notation, the count of votes from # given group for given candidate within precinct i (unobserved) votes_within_group_count = tt.arange(lower, upper) component_for_current_precinct = pm.math.logsumexp( pm.Binomial.dist(n0_curr, b_1_curr).logp(votes_within_group_count) + pm.Binomial.dist(n1_curr, b_2_curr).logp(obs_vote - votes_within_group_count) )[0] return prev + component_for_current_precinct
2a2d80671b594d2c56d6db5dc770833d5d8aa129
27,019
def parse_locator(src): """ (src:str) -> [pathfile:str, label:either(str, None)] """ pathfile_label = src.split('#') if len(pathfile_label)==1: pathfile_label.append(None) if len(pathfile_label)!=2: raise ValueError('Malformed src: %s' % (src)) return pathfile_label
970bc1e2e60eec4a54cd00fc5984d22ebc2b8c7a
27,020
def detect_seperator(path, encoding): """ :param path: pathlib.Path objects :param encoding: file encoding. :return: 1 character. """ # After reviewing the logic in the CSV sniffer, I concluded that all it # really does is to look for a non-text character. As the separator is # determined by the first line, which almost always is a line of headers, # the text characters will be utf-8,16 or ascii letters plus white space. # This leaves the characters ,;:| and \t as potential separators, with one # exception: files that use whitespace as separator. My logic is therefore # to (1) find the set of characters that intersect with ',;:|\t' which in # practice is a single character, unless (2) it is empty whereby it must # be whitespace. text = "" for line in path.open('r', encoding=encoding): # pick the first line only. text = line break seps = {',', '\t', ';', ':', '|'}.intersection(text) if not seps: if " " in text: return " " else: raise ValueError("separator not detected") if len(seps) == 1: return seps.pop() else: frq = [(text.count(i), i) for i in seps] frq.sort(reverse=True) # most frequent first. return frq[0][-1]
8436359a602d2b8caf72a6dbdac4870c502d1bad
27,021
def pr_at_k(df, k): """ Returns p/r for a specific result at a specific k df: pandas df with columns 'space', 'time', 'y_true', and 'y_pred' k: the number of obs you'd like to label 1 at each time """ #static traits of df universe = df['time'].nunique() p = df['y_true'].sum() #needs to be sorted by (time, y_pred) #group by time and find the num tp in the top k tp = df.groupby('time').pipe(tp_group, k) fp = (universe*k) - tp precision = tp/(tp+fp) recall = tp/p return precision, recall
79997405f360fa66c4e0cbe35d54a15976cc6e3b
27,022
def draw_segm(im, np_segms, np_label, np_score, labels, threshold=0.5, alpha=0.7): """ Draw segmentation on image. """ mask_color_id = 0 w_ratio = .4 color_list = get_color_map_list(len(labels)) im = np.array(im).astype('float32') clsid2color = {} np_segms = np_segms.astype(np.uint8) for i in range(np_segms.shape[0]): mask, score, clsid = np_segms[i], np_score[i], np_label[i] + 1 if score < threshold: continue if clsid not in clsid2color: clsid2color[clsid] = color_list[clsid] color_mask = clsid2color[clsid] for c in range(3): color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255 idx = np.nonzero(mask) color_mask = np.array(color_mask) im[idx[0], idx[1], :] *= 1.0 - alpha im[idx[0], idx[1], :] += alpha * color_mask sum_x = np.sum(mask, axis=0) x = np.where(sum_x > 0.5)[0] sum_y = np.sum(mask, axis=1) y = np.where(sum_y > 0.5)[0] x0, x1, y0, y1 = x[0], x[-1], y[0], y[-1] cv2.rectangle(im, (x0, y0), (x1, y1), tuple(color_mask.astype('int32').tolist()), 1) bbox_text = '%s %.2f' % (labels[clsid], score) t_size = cv2.getTextSize(bbox_text, 0, 0.3, thickness=1)[0] cv2.rectangle(im, (x0, y0), (x0 + t_size[0], y0 - t_size[1] - 3), tuple(color_mask.astype('int32').tolist()), -1) cv2.putText(im, bbox_text, (x0, y0 - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), 1, lineType=cv2.LINE_AA) return Image.fromarray(im.astype('uint8'))
f2248256a0be01efb1e9402d1e51e04a5fde365d
27,023
def solve(board): """ solve a sudoku board using backtracking param board: 2d list of integers return: solution """ space_found = find_empty_space(board) if not space_found: return True else: row, col = space_found for i in range(1, 10): if valid_number(board, i, (row, col)): board[row][col] = i if solve(board): return True board[row][col] = 0 return False
5b94db3ba4873c0fc5e91355dab6c59ed0603fa0
27,024
from typing import List from typing import Optional def check(s: str) -> None: """ Checks if the given input string of brackets are balanced or not Args: s (str): The input string """ stack: List[str] = [] def get_opening(char: str) -> Optional[str]: """ Gets the corresponding opening braces of the given input character. Args: char (str): The closing braces Returns: str: The corresponding open braces. """ if char == ")": return "(" if char == "]": return "[" if char == "}": return "{" return None # for every character in the given input string for char in s: # if the string is an opening brace, push to stack if char in ("(", "{", "["): stack.append(char) else: try: # if the top element of the stack is the same as # the corresponding opening bracket of the current # character, pop the element if get_opening(char) == stack[-1]: stack.pop() # else, the input string is unbalanced, break out of the # loop else: break except IndexError: break else: # if the loop terminated normally, and stack is empty, print success message if len(stack) == 0: print("Balanced.") # else print unsuccessful message else: print("Not balanced.") return # since at this point the loop terminated abnormally, # print unsuccessful message print("Not balanced.")
720018e5b39e070f48e18c502e8a842feef32840
27,025
def format_address(msisdn): """ Format a normalized MSISDN as a URI that ParlayX will accept. """ if not msisdn.startswith('+'): raise ValueError('Only international format addresses are supported') return 'tel:' + msisdn[1:]
f5a5cc9f8bcf77f1185003cfd523d7d6f1212bd8
27,026
def get_nag_statistics(nag): """Return a report containing all NAG statistics""" report = """Constants: {0} Inputs: {1} NANDs: {2} Outputs: {3} Min. I/O distance: {4} Max. I/O distance: {5}""".format( nag.constant_number, nag.input_number, nag.nand_number, nag.output_number, nag.input_to_output_min_distance, nag.input_to_output_max_distance) return report
44d3f32bc0b05d8b1d81c3b32dc140af4fd20aa0
27,027
def create_backup(storage, remote, parent=None): """ Create a new backup of provided remote and return its backup object. .. warning:: Do not forget to add a label on returned backup to avoid its removal by the garbage collector. """ if parent: parent_ref = storage.resolve(parent) parent_backup = storage.get_backup(parent_ref) if parent_backup: parent_root = storage.get_tree(parent_backup.root) else: parent_ref = None parent_root = None backup = Backup(parent=parent_ref) with backup, remote: backup.errors, backup.stats, backup.root = walk_and_ingest_remote(remote, storage, parent=parent_root) ref, size, stored_size = storage.ingest(backup) return ref, backup
7c4f5b424d6c48474fce74396eb6e47d0935f559
27,029
def get_pairwise_correlation(population_df, method="pearson"): """Given a population dataframe, calculate all pairwise correlations. Parameters ---------- population_df : pandas.core.frame.DataFrame Includes metadata and observation features. method : str, default "pearson" Which correlation matrix to use to test cutoff. Returns ------- list of str Features to exclude from the population_df. """ # Check that the input method is supported method = check_correlation_method(method) # Get a symmetrical correlation matrix data_cor_df = population_df.corr(method=method) # Create a copy of the dataframe to generate upper triangle of zeros data_cor_natri_df = data_cor_df.copy() # Replace upper triangle in correlation matrix with NaN data_cor_natri_df = data_cor_natri_df.where( np.tril(np.ones(data_cor_natri_df.shape), k=-1).astype(np.bool) ) # Acquire pairwise correlations in a long format # Note that we are using the NaN upper triangle DataFrame pairwise_df = data_cor_natri_df.stack().reset_index() pairwise_df.columns = ["pair_a", "pair_b", "correlation"] return data_cor_df, pairwise_df
85f1df4357f9996492bac6053a2f0852b2318f14
27,030
def plot_route(cities, route, name='diagram.png', ax=None): """Plot a graphical representation of the route obtained""" mpl.rcParams['agg.path.chunksize'] = 10000 if not ax: fig = plt.figure(figsize=(5, 5), frameon = False) axis = fig.add_axes([0,0,1,1]) axis.set_aspect('equal', adjustable='datalim') plt.axis('off') axis.scatter(cities['x'], cities['y'], color='red', s=4) route = cities.reindex(route) route.loc[route.shape[0]] = route.iloc[0] axis.plot(route['x'], route['y'], color='purple', linewidth=1) plt.savefig(name, bbox_inches='tight', pad_inches=0, dpi=200) plt.close() else: ax.scatter(cities['x'], cities['y'], color='red', s=4) route = cities.reindex(route) route.loc[route.shape[0]] = route.iloc[0] ax.plot(route['x'], route['y'], color='purple', linewidth=1) return ax
b8ceadb0a26e6f8c2eacea66ede9db948d73ca65
27,032
import math def bertScore(string): """ Function to generate the output list consisting top K replacements for each word in the sentence using BERT. """ corrector = SpellCorrector() temp1 = [] temp2 = [] temp3 = [] con = list(string.split(" ")) tf.reset_default_graph() sess = tf.InteractiveSession() model = Model() sess.run(tf.global_variables_initializer()) var_lists = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="bert") for word in con: possible_states = corrector.edit_candidates(word, fast=False) if len(possible_states) == 1: word = possible_states[0] if word in possible_states: temp1.append([word]) continue text = string text_mask = text.replace(word, "**mask**") print(text_mask) cls = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="cls") replaced_masks = [ text_mask.replace("**mask**", state) for state in possible_states ] # print(replaced_masks) val = math.ceil(len(replaced_masks) / 5) m = 0 n = 5 for i in range(0, val): rep_new = replaced_masks[m:n] tokens = tokenizer.tokenize(rep_new[0]) input_ids = [maskedId(tokens, i) for i in range(len(tokens))] tokens_ids = tokenizer.convert_tokens_to_ids(tokens) ids = [generateId(mask) for mask in rep_new] tokens, input_ids, tokens_ids = list(zip(*ids)) indices, ids = [], [] for i in range(len(input_ids)): indices.extend([i] * len(input_ids[i])) ids.extend(input_ids[i]) masked_padded = tf.keras.preprocessing.sequence.pad_sequences( ids, padding="post" ) preds = sess.run( tf.nn.log_softmax(model.logits), feed_dict={model.X: masked_padded} ) preds = np.reshape( preds, [masked_padded.shape[0], masked_padded.shape[1], 119547] ) indices = np.array(indices) scores = [] for i in range(len(tokens) - 1): filter_preds = preds[indices == i] total = np.sum( [filter_preds[k, k + 1, x] for k, x in enumerate(tokens_ids[i])] ) scores.append(total) prob_scores = np.array(scores) / np.sum(scores) probs = list(zip(possible_states, prob_scores)) for i in probs: temp3.append(i) m += 5 n += 5 temp3.sort(key=lambda x: x[1]) list(temp3) j = 0 for i in temp3: if j != 3: temp2.append(i[0]) if j == 3: break j = j + 1 if len(temp2) != 0: temp1.append(temp2) else: temp1.append([word]) temp2 = [] temp3 = [] sess.close() return temp1
9edf75111a0df95532a1332f6b0f4b5dbe495ac2
27,033
import dataclasses def configuration_stub(configuration_test: Configuration) -> Configuration: """ Configuration for tests. """ return dataclasses.replace( configuration_test, distance_between_wheels=DISTANCE_BETWEEN_WHEELS, )
ea2fe84c19f86062fd728bd10814303026776c03
27,034
def join_smiles(df, df_smiles=None, how="left"): """Join Smiles from Compound_Id.""" if df_smiles is None: load_resource("SMILES") df_smiles = SMILES result = df.merge(df_smiles, on="Compound_Id", how=how) result = result.apply(pd.to_numeric, errors='ignore') result = result.fillna("*") return result
e36bc5d31764e5eb8fdcf006b05e4fe75eeff36a
27,035
def signature(*types, **kwtypes): """Type annotations and conversions for methods. Ignores first parameter. """ conversions = [(t if isinstance(t, tuple) else (t, t)) for t in types] kwconversions = {k: (t if isinstance(t, tuple) else (t, t)) for k, t in kwtypes.items()} def decorator(fn): @wraps(fn) def wrapped(self, *args, **kwargs): args = [(arg if isinstance(arg, t) else conv(arg)) for (t, conv), arg in zip(conversions, args)] kwargs = {k: (v if isinstance(v, t) else conv(v)) for k, (t, conv), v in ((k, kwconversions[k], v) for k, v in kwargs.items())} return fn(self, *args, **kwargs) return wrapped return decorator
414ecfd4738b431e8e059319c347a6e7bedabc80
27,036
def mean(image): """The mean pixel value""" return image.mean()
176dd8d483008fa1071f0f0be20c4b53ad0e2a5f
27,037
import yaml def read_event_file(file_name): """Read a file and return the corresponding objects. :param file_name: Name of file to read. :type file_name: str :returns: ServiceEvent from file. :rtype: ServiceEvent """ with open(file_name, 'r') as f: contents = yaml.safe_load(f) event = ServiceEvent( contents['timestamp'], contents['service'], contents['reason'], contents['action'], policy_requestor_name=contents.get('policy_requestor_name'), policy_requestor_type=contents.get('policy_requestor_type')) return event
66da0a76f064dd99c9b2eff5594aa58f5d1d8cca
27,038
def get_layer_information(cloudsat_filenames, get_quality=True, verbose=0): """ Returns CloudLayerType: -9: error, 0: non determined, 1-8 cloud types """ all_info = [] for cloudsat_path in cloudsat_filenames: sd = SD(cloudsat_path, SDC.READ) if verbose: # List available SDS datasets. print("hdf datasets:", sd.datasets()) # get cloud types at each height for value in all_info: value.append(sd.select('CloudLayerType').get()) for value in all_info: value = np.vstack(value).astype(np.int8) return all_info
e3c52cd9284730c35da3ddbc1879d0e083fa63bd
27,040
import torch def from_magphase(mag_spec, phase, dim: int = -2): """Return a complex-like torch tensor from magnitude and phase components. Args: mag_spec (torch.tensor): magnitude of the tensor. phase (torch.tensor): angle of the tensor dim(int, optional): the frequency (or equivalent) dimension along which real and imaginary values are concatenated. Returns: :class:`torch.Tensor`: The corresponding complex-like torch tensor. """ return torch.cat([mag_spec * torch.cos(phase), mag_spec * torch.sin(phase)], dim=dim)
2f33de266fa295d0c21cf5002f6420c60eb07071
27,041