content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def jaccard_similarity_coefficient(A, B, no_positives=1.0): """Returns the jaccard index/similarity coefficient between A and B. This should work for arrays of any dimensions. J = len(intersection(A,B)) / len(union(A,B)) To extend to probabilistic input, to compute the intersection, use the min(A,B). To compute the union, use max(A,B). Assumes that a value of 1 indicates the positive values. A value of 0 indicates the negative values. If no positive values (1) in either A or B, then returns no_positives. """ # Make sure the shapes are the same. if not A.shape == B.shape: raise ValueError("A and B must be the same shape") # Make sure values are between 0 and 1. if np.any( (A>1.) | (A<0) | (B>1.) | (B<0)): raise ValueError("A and B must be between 0 and 1") # Flatten to handle nd arrays. A = A.flatten() B = B.flatten() intersect = np.minimum(A,B) union = np.maximum(A, B) # Special case if neither A or B have a 1 value. if union.sum() == 0: return no_positives # Compute the Jaccard. J = float(intersect.sum()) / union.sum() return J
fe408565827f61323513d7d3b562bd79a23e47ec
13,700
def get_argument_from_call(call_node: astroid.Call, position: int = None, keyword: str = None) -> astroid.Name: """Returns the specified argument from a function call. :param astroid.Call call_node: Node representing a function call to check. :param int position: position of the argument. :param str keyword: the keyword of the argument. :returns: The node representing the argument, None if the argument is not found. :rtype: astroid.Name :raises ValueError: if both position and keyword are None. :raises NoSuchArgumentError: if no argument at the provided position or with the provided keyword. """ if position is None and keyword is None: raise ValueError('Must specify at least one of: position or keyword.') if position is not None: try: return call_node.args[position] except IndexError: pass if keyword and call_node.keywords: for arg in call_node.keywords: if arg.arg == keyword: return arg.value raise NoSuchArgumentError
e4b7e054c4728f5b74bcbbe1678816a910f64bda
13,701
def snake_string(ls): """ Question 7.11: Write a string sinusoidally """ result = [] strlen = len(ls) for idx in xrange(1, strlen, 4): result.append(ls[idx]) for idx in xrange(0, strlen, 2): result.append(ls[idx]) for idx in xrange(3, strlen, 4): result.append(ls[idx]) return ''.join(result)
391f7cef4289c5746f77598501aeaa7ae93d31bc
13,702
def _prepare_memoization_key(args, kwargs): """ Make a tuple of arguments which can be used as a key for a memoized function's lookup_table. If some object can't be hashed then used its __repr__ instead. """ key_list = [] for arg in args: try: hash(arg) key_list.append(arg) except: key_list.append(repr(arg)) for (k, v) in kwargs.items(): try: hash(k) hash(v) key_list.append((k, v)) except: key_list.append((repr(k), repr(v))) return tuple(key_list)
c83e08c42886ba0e7f6e4defe5bc8f53f5682657
13,703
def kl_divergence_with_logits(p_logits = None, q_logits = None, temperature = 1.): """Compute the KL between two categorical distributions from their logits. Args: p_logits: [..., dim] array with logits for the first distribution. q_logits: [..., dim] array with logits for the second distribution. temperature: the temperature for the softmax distribution, defaults at 1. Returns: an array of KL divergence terms taken over the last axis. """ chex.assert_type([p_logits, q_logits], float) chex.assert_equal_shape([p_logits, q_logits]) p_logits /= temperature q_logits /= temperature p = jax.nn.softmax(p_logits) log_p = jax.nn.log_softmax(p_logits) log_q = jax.nn.log_softmax(q_logits) kl = jnp.sum(p * (log_p - log_q), axis=-1) ## KL divergence should be positive, this helps with numerical stability loss = jax.nn.relu(kl) return loss
1950dea9e5c6d040ce464e0861b09469742810c4
13,704
from typing import Any from datetime import datetime def convert_bosch_datetime(dt: Any = None) -> datetime: """Create a datetime object from the string (or give back the datetime object) from Bosch. Checks if a valid number of milliseconds is sent.""" if dt: if isinstance(dt, str): if dt.find(".") > 0: return datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%f%z") return datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S%z") if isinstance(dt, datetime): return dt return None
845e9de019b700b2ab37ebb4a1b577d0bd068638
13,705
def day_log_add_id(day_log): """ その日のログにID(day_id)を割り振る :param day_log: :return: """ for v in range(len(day_log)): day_log[v]['day_id'] = v + 1 return day_log
c4608b07e86c074a11cf78d171490ec152092eeb
13,706
def cisco_ios_l3_acl_parsed(): """Cisco IOS L3 Interface with ip address, acl, description and vlan.""" vlan = Vlan(id="300", encapsulation="dot1Q") ipv4 = IPv4(address="10.3.3.13", mask="255.255.255.128") acl_in = ACL(name="Common_Client_IN", direction="in") acl_out = ACL(name="TEST_ACL_03", direction="out") interface = Interface( name="FastEthernet0/0.300", description='"Test logical subinterface 3"', vlans=[vlan], ipv4=[ipv4], acl=[acl_in, acl_out], ) parsed_config = interface.dict() return parsed_config
25c7ad34695499bb6426ff71a9893c233b54a925
13,707
def brillance(p, g, m = 255): """ p < 0 : diminution de la brillance p > 0 : augmentation de la brillance """ if (p + g < m + 1) and (p + g > 0): return int(p + g) elif p + g <= 0: return 0 else: return m
b40169e487521c146c4c0777517492205951cf16
13,708
def fetch_fact(): """Parse the command parameters, validate them, and respond. Note: This URL must support HTTPS and serve a valid SSL certificate. """ # Parse the parameters you need token = request.form.get('token', None) # TODO: validate the token command = request.form.get('command', None) text = request.form.get('text', None) # Validate the request parameters if not token: # or some other failure condition abort(400) return jsonify({'response_type': 'in_channel', 'text': 'foo' })
8e5175b19d2548428e852dd9f476333dbe40481d
13,709
def payback(request): """ 微信支付回调函数 :param request: :return: """ return HttpResponse('payback')
e178abe0effe6359a664dca434e181390c1a56c1
13,710
from datetime import datetime def get_index_shares(name, end_date=None): """获取某一交易日的指数成分股列表 symbols = get_index_shares("上证50", "2019-01-01 09:30:00") """ if not end_date: end_date = datetime.now().strftime(date_fmt) else: end_date = pd.to_datetime(end_date).strftime(date_fmt) constituents = get_history_constituents(indices[name], end_date, end_date)[0] symbol_list = [k for k, v in constituents['constituents'].items()] return list(set(symbol_list))
7a9e2890d0508b00d15da4688980736776199cfa
13,711
def erfcx(x): """Elementwise scaled complementary error function. .. note:: Forward computation in CPU cannot be done if `SciPy <https://www.scipy.org/>`_ is not available. Args: x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable. Returns: ~chainer.Variable: Output variable. """ return Erfcx().apply((x,))[0]
60f1655a6e390ca935f80d33e0d9156879b56c41
13,712
def fetch_data_async(blob, start_index, end_index, rpc=None): """Asynchronously fetches data for a blob. Fetches a fragment of a blob up to `MAX_BLOB_FETCH_SIZE` in length. Attempting to fetch a fragment that extends beyond the boundaries of the blob will return the amount of data from `start_index` until the end of the blob, which will be a smaller size than requested. Requesting a fragment that is entirely outside the boundaries of the blob will return an empty string. Attempting to fetch a negative index will raise an exception. Args: blob: A `BlobInfo`, `BlobKey`, string, or Unicode representation of the `BlobKey` of the blob from which you want to fetch data. start_index: The start index of blob data to fetch. This value must not be negative. end_index: The end index (inclusive) of the blob data to fetch. This value must be greater than or equal to `start_index`. rpc: Optional UserRPC object. Returns: A UserRPC whose result will be a string as returned by `fetch_data()`. Raises: TypeError: If `start_index` or `end_index` are not indexes, or if `blob` is not a string, `BlobKey` or `BlobInfo`. DataIndexOutOfRangeError: If `start_index` is set to a value that is less than 0 or `end_index` is less than `start_index` when calling `rpc.get_result()`. BlobFetchSizeTooLargeError: If the requested blob fragment is larger than `MAX_BLOB_FETCH_SIZE` when calling `rpc.get_result()`. BlobNotFoundError: If the blob does not exist when calling `rpc.get_result()`. """ if isinstance(blob, BlobInfo): blob = blob.key() return blobstore.fetch_data_async(blob, start_index, end_index, rpc=rpc)
518f1ef45c19b8a7be55940d9abdeaf0fe014835
13,713
def get_legal_moves(color, size, board): """ Get Legal Moves """ legal_moves = {} for y in range(size): for x in range(size): reversibles = get_reversibles(color, size, board, x, y) if reversibles: legal_moves[(x, y)] = reversibles return legal_moves
eaab0b7fededbe660b02974f675877b97e3327f4
13,714
def edition(self, key, value): """Translates edition indicator field.""" sub_a = clean_val("a", value, str) if sub_a: return sub_a.replace("ed.", "") raise IgnoreKey("edition")
715724dffb4ef6d72c173afbf8186acfdf9f20e3
13,715
from typing import Dict from typing import Set import itertools def get_site_data(hostname: str) -> SiteData: """Get metadata about a site from the API""" url = f"https://{hostname}/w/api.php" data = dict( action="query", meta="siteinfo", siprop="|".join( [ "namespaces", "namespacealiases", "specialpagealiases", "magicwords", "general", ] ), formatversion="2", format="json", ) res_json = backoff_retry("get", url, params=data, output="json") namespaces: Dict[str, Set[str]] = {} all_namespaces = res_json["query"]["namespaces"] namespace_aliases = res_json["query"]["namespacealiases"] for namespace, nsdata in all_namespaces.items(): namespaces.setdefault(namespace, set()).update( [ datasources.normal_name(nsdata.get("canonical", "").lower()), datasources.normal_name(nsdata.get("name", "").lower()), ] ) for nsdata in namespace_aliases: namespaces.setdefault(str(nsdata["id"]), set()).add( datasources.normal_name(nsdata.get("alias", "").lower()) ) specialpages = { item["realname"]: item["aliases"] for item in res_json["query"]["specialpagealiases"] } magicwords = { item["name"]: item["aliases"] for item in res_json["query"]["magicwords"] } general = res_json["query"]["general"] contribs = {datasources.normal_name(name) for name in specialpages["Contributions"]} subst = list( itertools.chain( magicwords.get("subst", ["SUBST"]), [item.lower() for item in magicwords.get("subst", ["SUBST"])], [item[0] + item[1:].lower() for item in magicwords.get("subst", ["SUBST"])], ) ) sitedata = SiteData( user=namespaces["2"] - {""}, user_talk=namespaces["3"] - {""}, file=namespaces["6"] - {""}, special=namespaces["-1"] - {""}, contribs=contribs, subst=subst, dbname=general["wikiid"], hostname=hostname, ) return sitedata
83ca853c6fb2ebadf6473b8f5da0008b145717b0
13,716
def clear_monitor(nodenet_uid, monitor_uid): """Leaves the monitor intact, but deletes the current list of stored values.""" micropsi_core.runtime.get_nodenet(nodenet_uid).get_monitor(monitor_uid).clear() return True
ad39c344f41fcf307f85d09add71eeeac66b30c1
13,717
def loadGrammarFrom(filename, data=None): """Return the text of a grammar file loaded from the disk""" with open(filename, 'r') as f: text = f.read() lookup = mako.lookup.TemplateLookup(directories=[relativePath('grammars')]) template = mako.template.Template(text, lookup=lookup) # base_data = {} base_data.update(BASE_GRAMMAR_SETTINGS) # if data: for k, v in data.items(): if v is not None: base_data[k] = v # return str(template.render(**base_data))
0a0bbd0f2af5db4c673d7dbd31259a3977adb9cf
13,718
def create_generator_selfatt(generator_inputs, generator_outputs_channels, flag_I=True): """ Add Conditional Self-Attention Modual to the U-Net Generator. By default, 256x256 => 256x256 Args: generator_inputs: a tensor of input images, [b, h, w, n], with each pixel value [-1, 1]. generator_outputs_channels: the number of generator output channels. flag_I: bool flag to indicate if add conditional input to self-attention layer. Returns: layers[-1]: the output of generator, eg the generated images batch, [b, h, w, n], with each pixel value [-1, 1]. beta_list: list of beta matrics, save to visualize attention maps. Note: a beta matrix is too large to view directly, visualize it row by row as attention maps """ # save output of layers for skip connections layers = [] ###################### encoder ########################################### # encoder_1: [batch, 256, 256, in_channels] => [batch, 128, 128, ngf] with tf.variable_scope("encoder_1"): output = ops.conv(generator_inputs, channels=a.ngf, kernel=4, stride=2, pad=1, sn=a.sn) output = ops.lrelu(output, 0.2) # consider: append output before/after lrelu. # Why not use batch norm in the first layer? layers.append(output) # encoder information, (out_channels) encoder_layers = [ (a.ngf * 2), # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2] (a.ngf * 4), # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4] (a.ngf * 8), # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8] (a.ngf * 8), # encoder_5: [batch, 16, 16, ngf * 8] => [batch, 8, 8, ngf * 8] (a.ngf * 8), # encoder_6: [batch, 8, 8, ngf * 8] => [batch, 4, 4, ngf * 8] # a.ngf * 8, # encoder_7: [batch, 4, 4, ngf * 8] => [batch, 2, 2, ngf * 8] # a.ngf * 8, # encoder_8: [batch, 2, 2, ngf * 8] => [batch, 1, 1, ngf * 8] ] beta_list = [] for i, out_channels in enumerate(encoder_layers): with tf.variable_scope("encoder_%d" % (len(layers) + 1)): # [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels] # Conv + BN + leakyReLU + [selfatt] output = ops.conv(layers[-1], channels=out_channels, kernel=4, stride=2, pad=1, sn=a.sn) output = batchnorm(output) # not use ops.batch_norm, because do not know its update strategy output = ops.lrelu(output, 0.2) if a.enc_atten[i]=='T': output, beta = selfatt(output, tf.image.resize_images(generator_inputs, output.shape[1:3]), out_channels, flag_I=flag_I, channel_fac=a.channel_fac) beta_list.append(beta) layers.append(output) ###################### decoder ########################################### # Explictly assign decoder to /gpu:1 # Consider: layers[] is assign to /gpu:0 by default, skip connections involve communication between GPUs. with tf.device("/gpu:1"): # decoder information: (out_channels, dropout rate) decoder_layers = [ # (a.ngf * 8, 0.0), # decoder_8: [batch, 1, 1, ngf * 8] => [batch, 2, 2, ngf * 8 * 2] # (a.ngf * 8, 0.0), # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2] (a.ngf * 8, 0.0), # decoder_6: [batch, 4, 4, ngf * 8 * 2] => [batch, 8, 8, ngf * 8 * 2] (a.ngf * 8, 0.0), # decoder_5: [batch, 8, 8, ngf * 8 * 2] => [batch, 16, 16, ngf * 8 * 2] (a.ngf * 4, 0.0), # decoder_4: [batch, 16, 16, ngf * 8 * 2] => [batch, 32, 32, ngf * 4 * 2] (a.ngf * 2, 0.0), # decoder_3: [batch, 32, 32, ngf * 4 * 2] => [batch, 64, 64, ngf * 2 * 2] (a.ngf, 0.0), # decoder_2: [batch, 64, 64, ngf * 2 * 2] => [batch, 128, 128, ngf * 2] ] num_encoder_layers = len(layers) for decoder_layer, (out_channels, dropout) in enumerate(decoder_layers): skip_layer = num_encoder_layers - decoder_layer - 1 with tf.variable_scope("decoder_%d" % (skip_layer + 1)): if decoder_layer == 0 or decoder_layer >= a.num_unet: # first decoder layer is directly connected to the skip_layer # a.num_unet controls the number of skip connections input = layers[-1] else: input = tf.concat([layers[-1], layers[skip_layer]], axis=3) # [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels] # Up-sample + 1x1 Conv + BN + leakyReLU + [selfatt] + [dropout] output = ops.up_sample(input, scale_factor=2) #use upsample+conv replace deconv to advoid checkboard effect output = ops.conv(output, channels=out_channels, kernel=3, stride=1, pad=1, sn=True) output = batchnorm(output) output = ops.lrelu(output) if a.dec_atten[i]=='T': output, beta = selfatt(output, tf.image.resize_images(generator_inputs, output.shape[1:3]), out_channels, flag_I=flag_I, channel_fac=a.channel_fac) beta_list.append(beta) if dropout > 0.0: output = tf.nn.dropout(output, keep_prob=1 - dropout) layers.append(output) with tf.device("/gpu:1"): # decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels] with tf.variable_scope("decoder_1"): output = tf.concat([layers[-1], layers[0]], axis=3) output = tf.nn.relu(output) output = deconv(output, generator_outputs_channels) output = tf.tanh(output) layers.append(output) return layers[-1], beta_list
bfcc81955c7849e84053c45ea7a593570059bf28
13,719
def by_tag(articles_by_tag, tag): """ Filter a list of (tag, articles) to list of articles by tag""" for a in articles_by_tag: if a[0].slug == tag: return a[1]
642472a89cb624ed02a6e8ec488b72856ac231a9
13,720
def experiment(dataset='SUPPORT', quantiles=(0.25, 0.5, 0.75), prot_att='race', groups=('black', 'white'), model='dcm', adj='KM', cv_folds=5, seed=100, hyperparams=None, plot=True, store=False): """Top level interface to train and evaluate proposed survival models. This is the top level function that is designed to be called directly from inside a jupyter notebook kernel. This function allows the user to run one of the proposed survival analysis models on the SUPPORT datasets in a cross validation fashion. The function then plots and outputs the Expected Calibration Error and ROC characteristic at various event time quantiles. Parameters ---------- dataset: str a string that determines the dataset to run experiments on. One of "FLCHAIN" or "SUPPORT". quantiles: list a list of event time quantiles at which the models are to be evaluated. prot_att: str a string that specifies the column in the dataset that is to be treated as a protected attribute. groups: list a list of strings indicating groups on which the survival analysis models are to be evaluated vis a vis discrimination and calibration. model: str the choice of the proposed survival analysis model. currently supports only "dcm". adj: str the choice of adjustment for the L1-ECE: one of * 'IPCW': Inverse Propensity of Censoring Weighting. * 'KM': Kaplan-Meier. cv_folds: int int that determines the number of Cross Validation folds. seed: int numpy random seed. hyperparams: dict a dict with hyperparams for the DCM model. plot: bool binary flag to determine if the results are to be plotted. store: bool whether the models/results are to be stored to disk. Returns: a Matplotlib figure with the ROC Curves and Reliability (Calibration) curves at various event quantiles. """ np.random.seed(seed) fair_strategy = None (x, t, e, a), folds, quantiles = load_dataset(dataset, cv_folds, prot_att, fair_strategy, quantiles) trained_model = models.train_model(x, t, e, a, folds, groups, params=hyperparams) if store: store_model(dataset, model, trained_model, params) if plot: outputs = predict(trained_model, model, x, t, e, a, folds, quantiles, fair_strategy) results = plots.plot_results(outputs, x, e, t, a, folds, groups, quantiles, strat='quantile', adj=adj) return results
79ec44d4d62a42dea4f7e612cd4291ce8fbc5585
13,721
def ldns_str2rdf_type(*args): """LDNS buffer.""" return _ldns.ldns_str2rdf_type(*args)
d121f8534c64b7597d775e5443b706c962ec738a
13,722
import hashlib import _crypt def scramble(password, message): """scramble message with password""" scramble_length = 20 sha_new = partial(hashlib.new, 'sha1') if not password: return b'' stage1 = sha_new(password).digest() stage2 = sha_new(stage1).digest() buf = sha_new() buf.update(message[:scramble_length]) buf.update(stage2) result = buf.digest() return _crypt(result, stage1)
9ad006a5626d7b4ca3f8220dc4cbdd719a3cbac8
13,723
def dp_port_id(switch: str, port: str) -> str: """ Return a unique id of a DP switch port based on switch name and port name :param switch: :param port: :return: """ return 'port+' + switch + ':' + port
479891e41b51114744dcbb2b177180c19cd1bfd5
13,724
import requests def request_item(zip_code, only_return_po_boxes=False, spatial_reference='4326'): """ Request data for a single ZIP code, either routes or PO boxes. Note that the spatial reference '4326' returns latitudes and longitudes of results. """ url = BASE_URL.format( zip_code=str(zip_code), spatial_reference=str(spatial_reference), route_or_box='B' if only_return_po_boxes else 'R' ) response = requests.get(url) response.raise_for_status() return response.json()
956a2a86f0960a888046bfd5a8e3c2d7c56bc9dc
13,725
def smoothen_histogram(hist: np.array) -> np.array: """ Smoothens a histogram with an average filter. The filter as defined as multiple convolutions with a three-tap box filter [1, 1, 1] / 3. See AOS section 4.1.B. Args: hist: A histogram containing gradient orientation counts. Returns: hist_smoothed: The histogram after average smoothing. """ pad_amount = round(len(smooth_kernel) / 2) hist_pad = np.pad(hist, pad_width=pad_amount, mode='wrap') hist_smoothed = np.convolve(hist_pad, smooth_kernel, mode='valid') return hist_smoothed
bdcc5de3df5aa2aad33653cce237f7f07d825b9d
13,726
from typing import Tuple def end_point(min_radius: float, max_radius: float) -> Tuple[int, int]: """ Generate a random goal that is reachable by the robot arm """ # Ensure theta is not 0 theta = (np.random.random() + np.finfo(float).eps) * 2 * np.pi # Ensure point is reachable r = np.random.uniform(low=min_radius, high=max_radius) x = int(r * np.cos(theta)) y = int(r * np.sin(theta)) #x = -53 #y = -84 return x, y
8d6a79195108e8354fad986f93da5f089b6df0d7
13,727
def expand_tile(value, size): """Add a new axis of given size.""" value = tf.convert_to_tensor(value=value, name='value') ndims = value.shape.ndims return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims)
50adf652fff47418d1f8f1250a2a6d01f712da76
13,728
from typing import Mapping from typing import Any def parse_header( info: Mapping[str, Any], field_meta_data: Mapping[str, FieldMetaData], component_meta_data: Mapping[str, ComponentMetaData] ) -> Mapping[str, MessageMemberMetaData]: """Parse the header. Args: info (Mapping[str, Any]): The header. field_meta_data (Mapping[str, FieldMetaData]): The field metadata. component_meta_data (Mapping[str, ComponentMetaData]): The component metadata. Returns: Mapping[str, MessageMemberMetaData]: The parsed header. """ return _to_message_member_meta_data(info, field_meta_data, component_meta_data)
a8043c62070c540712074c60e01e3c9c3ebfe99b
13,729
def amina_choo(update, context): #3.2.1 """Show new choice of buttons""" query = update.callback_query bot = context.bot keyboard = [ [InlineKeyboardButton("Yes", callback_data='0'), InlineKeyboardButton("No", callback_data='00')], [InlineKeyboardButton("Back",callback_data='3.2')] ] reply_markup = InlineKeyboardMarkup(keyboard) bot.edit_message_text( chat_id=query.message.chat_id, message_id=query.message.message_id, text="""We have found a lawyer that suits your needs!""", ) bot.send_photo( chat_id=query.message.chat_id, photo = open("female.jpg",'rb') ) bot.send_message( chat_id=query.message.chat_id, text = """Name: Amina Choo \nCompany: Boo and Ow LLP \nYears of Experience: 8""", ) bot.send_message( chat_id=query.message.chat_id, text = """See more on our website: https://eldoraboo.github.io/PairALegal/amina-choo""" ) bot.send_message( chat_id=query.message.chat_id, text = """Thank you for using Pair-A-Legal bot. \nWould you like to restart?""", reply_markup = reply_markup ) return FIRST
b43d2e6d63e111b9a2f70fd71e5da765ef923746
13,730
def lighten(data, amt=0.10, is255=False): """Lighten a vector of colors by fraction `amt` of remaining possible intensity. New colors are calculated as:: >>> new_colors = data + amt*(1.0 - data) >>> new_colors[:, -1] = 1 # keep all alpha at 1.0 Parameters ---------- data : matplotlib colorspec or sequence of colorspecs input color(s) amt : float, optional Percentage by which to lighten `r`, `g`, and `b`. `a` remains unchanged (Default: 0.10) is255 : bool, optional If `True`, rgb values in `data` are assumed to be tween 0 and 255 rather than 0.0 and 1.0. In this case, return values will also be between 0 and 255. Returns ------- numpy.ndarray Lightened version of data """ data = colorConverter.to_rgba_array(data) new_colors = data + amt * (1.0 - data) if is255: new_colors = (255 * new_colors).round() new_colors[:, -1] = data[:, -1] return new_colors
195faa21ba30989f900b9b7f2b655a97074d8833
13,731
def _determine_function_name_type(node): """Determine the name type whose regex the a function's name should match. :param node: A function node. :returns: One of ('function', 'method', 'attr') """ if not node.is_method(): return 'function' if node.decorators: decorators = node.decorators.nodes else: decorators = [] for decorator in decorators: # If the function is a property (decorated with @property # or @abc.abstractproperty), the name type is 'attr'. if (isinstance(decorator, astroid.Name) or (isinstance(decorator, astroid.Getattr) and decorator.attrname == 'abstractproperty')): infered = safe_infer(decorator) if infered and infered.qname() in PROPERTY_CLASSES: return 'attr' # If the function is decorated using the prop_method.{setter,getter} # form, treat it like an attribute as well. elif (isinstance(decorator, astroid.Getattr) and decorator.attrname in ('setter', 'deleter')): return 'attr' return 'method'
d80cfd4aabdd79023d636c7425b04e747420ad36
13,732
from typing import Optional import platform import os import json def find_egl_engine_windows(association: str) -> Optional[UnrealEngine]: """Find Epic Games Launcher engine distribution from EngineAssociation string.""" if platform.system() != "Windows": return None if os.path.isfile(DAT_FILE): with open(DAT_FILE, encoding="utf-8") as _datfile: for item in json.load(_datfile).get("InstallationList", []): if ( association == item.get("InstallLocation") or association == item.get("AppVersion", "").split("-")[0][:-2] ): return UnrealEngine( item.get("InstallLocation"), item.get("AppVersion", "").split("-")[0][:-2], ) return None
c01ac3e18de3c851120847989ae5937cb7e81288
13,733
from typing import Tuple import datasets from typing import List from typing import Dict from re import T def create_dataset( template_path: str = 'com_github_corypaik_coda/projects/coda/data/coda/templates.yaml', objects_path: str = 'com_github_corypaik_coda/projects/coda/data/coda/objects.jsonl', annotations_path: str = 'com_github_corypaik_coda/projects/coda/data/coda/annotations.jsonl', seed_for_splits: int = 12345, seed_for_kmeans: int = 0, ) -> Tuple[datasets.DatasetDict, pd.DataFrame]: """ Prepares a dataset and saves it disk Args: metadata_path: File to save with metadata about each object. output_dataset_dir: Directory to save the dataset to disk. Returns: ds: dataset containing all formatted examples (train, val, test splits) meta: dataframe containing metadata about each object. """ # maybe convert paths template_path = maybe_rlocation(template_path) objects_path = maybe_rlocation(objects_path) annotations_path = maybe_rlocation(annotations_path) # process annotations df = pd.read_json(annotations_path, orient='records', lines=True) # normalize # normalize df[COLORS] = df[COLORS].div(df[COLORS].sum(axis=1), 0) df = df.set_index(['class_id', 'worker_id'], verify_integrity=True) # apply a filter df = df.groupby('class_id', as_index=False).apply(_filter_annotations) df = df.reset_index() # average annotations df = df.groupby('class_id', as_index=False).mean() # kmeans for groupings. df = _get_object_groups(df, seed=seed_for_kmeans) # add template data. this also drops a few objects that we have annotations # for but are not included. tdf = pd.read_json(objects_path, orient='records', lines=True) df = df.merge(tdf, on='class_id', validate='one_to_one') df = df.sort_values('class_id') meta = df templates = _load_templates(template_path=template_path) # the real dataset: split groundtruth and filtered # gives us a dict for each split containing a list of objects (example form) split_objects = _generate_splits(df, seed=seed_for_splits) def _process_split(x: List[Dict[str, _T]]) -> Dict[str, List[_T]]: x = T.mapcat(_generate_examples_for_obj(templates=templates), x) x = list(x) x = {k: [el[k] for el in x] for k in x[0].keys()} return x # map each data = T.valmap(_process_split, split_objects) # metadata features = datasets.Features({ 'class_id': datasets.Value('string'), 'display_name': datasets.Value('string'), 'ngram': datasets.Value('string'), 'label': datasets.Sequence(datasets.Value('float')), 'object_group': datasets.ClassLabel(names=('Single', 'Multi', 'Any')), 'text': datasets.Value('string'), 'template_group': datasets.ClassLabel(names=('clip-imagenet', 'text-masked')), 'template_idx': datasets.Value('int32') }) # create dataset ds = datasets.DatasetDict( **{ split: datasets.Dataset.from_dict( mapping=mapping, features=features, split=split, ) for split, mapping in data.items() }) return ds, meta
915bb616e165b55234f140f2ea1577644876ddb7
13,734
def escape_blog_content(data): """Экранирует описание блога.""" if not isinstance(data, binary): raise ValueError('data should be bytes') f1 = 0 f2 = 0 # Ищем начало блока div_begin = b'<div class="blog-description">' f1 = data.find(b'<div class="blog-content text">') if f1 >= 0: f1 = data.find(div_begin, f1, f1 + 200) # Ищем конец if f1 >= 0: f2 = data.find(b'<ul class="blog-info">', f1 + 1) if f2 >= 0: f2 = data.rfind(b'</div>', f1 + 1, f2) if f1 < 0 or f2 < 0: # Не нашли return data body = data[f1 + len(div_begin):f2].strip() body = html_escape(body) result = ( data[:f1], b'<div class="blog-content text" data-escaped="1">', body, data[f2:] ) return b''.join(result)
285825b253de8ef9b67d7d3f1bdaa7a28f2e918c
13,735
import csv def read_csv(file_path, delimiter=",", encoding="utf-8"): """ Reads a CSV file Parameters ---------- file_path : str delimiter : str encoding : str Returns ------- collection """ with open(file_path, encoding=encoding) as file: data_in = list(csv.reader(file, delimiter=delimiter)) return data_in
a4f1da219b0e5d752ff606614e93abbfc3d30597
13,736
from typing import Tuple from pathlib import Path def get_cmd_items(pair: Tuple[str, Path]): """Return a list of Albert items - one per example.""" with open(pair[-1], "r") as f: lines = [li.strip() for li in f.readlines()] items = [] for i, li in enumerate(lines): if not li.startswith("- "): continue desc = li.lstrip("- ")[:-1] example_cmd = sanitize_string( lines[i + 2].strip("`").replace("{{", "").replace("}}", "") ) items.append( v0.Item( id=__prettyname__, icon=icon_path, text=example_cmd, subtext=desc, actions=[ v0.ClipAction("Copy command", example_cmd), v0.UrlAction( "Do a google search", f'https://www.google.com/search?q="{pair[0]}" command', ), ], ) ) return items
92d34ce5af3a3dbe162adf0766382120d0458c46
13,737
import importlib def import_activity_class(activity_name, reload=True): """ Given an activity subclass name as activity_name, attempt to lazy load the class when needed """ try: module_name = "activity." + activity_name importlib.import_module(module_name) return True except ImportError as e: return False
b4cea3fad1f08a5758972847d3e03a41f89f223c
13,738
def rgb2hsv(rgb): """ Reverse to :any:`hsv2rgb` """ eps = 1e-6 rgb = np.asarray(rgb).astype(float) maxc = rgb.max(axis=-1) minc = rgb.min(axis=-1) v = maxc s = (maxc - minc) / (maxc + eps) s[maxc <= eps] = 0.0 rc = (maxc - rgb[:, :, 0]) / (maxc - minc + eps) gc = (maxc - rgb[:, :, 1]) / (maxc - minc + eps) bc = (maxc - rgb[:, :, 2]) / (maxc - minc + eps) h = 4.0 + gc - rc maxgreen = (rgb[:, :, 1] == maxc) h[maxgreen] = 2.0 + rc[maxgreen] - bc[maxgreen] maxred = (rgb[:, :, 0] == maxc) h[maxred] = bc[maxred] - gc[maxred] h[minc == maxc] = 0.0 h = (h / 6.0) % 1.0 return np.asarray((h, s, v))
febb268b1b691897c28447ff00a29785742dfc0c
13,739
def fig_colorbar(fig, collections, *args, **kwargs): """Add colorbar to the right on a figure.""" fig.subplots_adjust(right=0.8) cax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) cbar = fig.colorbar(collections, cax, *args, **kwargs) plt.pause(0.1) return cbar
a3156d24e28407938661c003d30b80a4d57638e6
13,740
def _merge_css_item(item): """Transform argument into a single list of string values.""" # Recurse lists and tuples to combine into single list if isinstance(item, (list, tuple)): return _merge_css_list(*item) # Cast to string, be sure to cast falsy values to '' item = "{}".format(item) if item else "" # Return as a list return [item]
c6f0c8769761640d5b0d98168cee1308f3209072
13,741
def extract_arguments(start, string): """ Return the list of arguments in the upcoming function parameter closure. Example: string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))' arguments (output): '[{'start': 1, 'end': 7}, {'start': 8, 'end': 16}, {'start': 17, 'end': 19}, {'start': 20, 'end': 53}]' """ arguments = [] closures = { "<": 0, "(": 0 } current_position = start argument_start_pos = current_position + 1 # Search for final parenthesis while current_position < len(string): if string[current_position] == "(": closures["("] += 1 elif string[current_position] == ")": closures["("] -= 1 elif string[current_position] == "<": closures["<"] += 1 elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0: closures["<"] -= 1 # Finished all arguments if closures["("] == 0 and closures["<"] == 0: # Add final argument arguments.append({"start": argument_start_pos, "end": current_position}) break # Finished current argument if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",": arguments.append({"start": argument_start_pos, "end": current_position}) argument_start_pos = current_position + 1 current_position += 1 return arguments
8e6e3fecc0643aa3f55108916a7c6892a96f13aa
13,742
def augment_img(img): """Data augmentation with flipping and rotation""" # TODO: Rewrite with torchvision transform flip_idx = np.random.choice([0, 1, 2]) if flip_idx != 0: img = np.flip(img, axis=flip_idx) rot_idx = int(np.random.choice([0, 1, 2, 3])) img = np.rot90(img, k=rot_idx, axes=(1, 2)) return img
4f124954b1dba8e27360a1c4664f0de46fd1a4d0
13,743
def iter_archive(path, method): """Iterate over an archive. Args: path: `str`, archive path method: `tfds.download.ExtractMethod`, extraction method Returns: An iterator of `(path_in_archive, f_obj)` """ return _EXTRACT_METHODS[method](path)
dda105efc20583f54aed26a55112cfc56380ad68
13,744
def unfoldPath(cwd, path): """ Unfold path applying os.path.expandvars and os.path.expanduser. Join 'path' with 'cwd' in the beginning If 'path' is not absolute path. Returns normalized absolute path. """ if not path: return path path = _expandvars(path) path = _expanduser(path) if not _isabs(path): path = _joinpath(cwd, path) path = _abspath(path) return path
ae11a69e6b2a3b0be4b3f266960d6004e99bd261
13,745
import os import sys import yaml def readConfig(config): """ This function reads configuration file and combine it with parameters from console input to generate the configuration list. Args: * config: configuration file to be readed Options: * keyfile (-k): str, path to a file containing the random data used as key for the cipher. workmode: You can choose one of the following methods: * lz4: Will compress input or decompres output with lz4, this has a great performance with all file types. * human: human usable mode. key will be read as 5 bits blocks, input and output will use ownBase32 encoding. This is useful when one of the sides is doing the maths by hand. * raw: default operation mode, will use 1KB pages for ciphering Returns: An array with configuration from file """ if not os.path.exists(config): sys.stderr.write("Could not find config file, " +"creating a default one\n") configFile = open(config, "w") configFile.write("---\nkeyfile: defaultrawfile.rnd\nworkmode: raw") configFile.close() return yaml.load(open(config, 'r'))
27d494c5e76f13def4785997565893caeddc59b2
13,746
import sys def main(argv): """Decide whether the needed jobs got satisfactory results.""" inputs = parse_inputs( raw_allowed_failures=argv[1], raw_allowed_skips=argv[2], raw_jobs=argv[3], ) jobs = inputs['jobs'] or {} jobs_allowed_to_fail = set(inputs['allowed_failures'] or []) jobs_allowed_to_be_skipped = set(inputs['allowed_skips'] or []) if not jobs: sys.exit( '❌ Invalid input jobs matrix, ' 'please provide a non-empty `needs` context', ) job_matrix_succeeded = all( job['result'] == 'success' for name, job in jobs.items() if name not in (jobs_allowed_to_fail | jobs_allowed_to_be_skipped) ) and all( job['result'] in {'skipped', 'success'} for name, job in jobs.items() if name in jobs_allowed_to_be_skipped ) set_final_result_outputs(job_matrix_succeeded) allowed_to_fail_jobs_succeeded = all( job['result'] == 'success' for name, job in jobs.items() if name in jobs_allowed_to_fail ) allowed_to_be_skipped_jobs_succeeded = all( job['result'] == 'success' for name, job in jobs.items() if name in jobs_allowed_to_be_skipped ) log_decision_details( job_matrix_succeeded, jobs_allowed_to_fail, jobs_allowed_to_be_skipped, allowed_to_fail_jobs_succeeded, allowed_to_be_skipped_jobs_succeeded, jobs, ) return int(not job_matrix_succeeded)
4abc60eb17bc6ddb1e51611cecda7c1d2ecde686
13,747
def get_predictions(logits): """ Convert logits into softmax predictions """ probs = F.softmax(logits, dim=1) confidence, pred = probs.max(dim=1, keepdim=True) return confidence, pred, probs
c83a47140534e27bb14991d4c8b2192a2a02cd46
13,748
def lookup_capacity(lookup_table, environment, cell_type, frequency, bandwidth, generation, site_density): """ Use lookup table to find capacity by clutter environment geotype, frequency, bandwidth, technology generation and site density. """ if (environment, cell_type, frequency, bandwidth, generation) not in lookup_table: raise KeyError("Combination %s not found in lookup table", (environment, cell_type, frequency, bandwidth, generation)) density_capacities = lookup_table[ (environment, cell_type, frequency, bandwidth, generation) ] lowest_density, lowest_capacity = density_capacities[0] if site_density < lowest_density: return 0 for a, b in pairwise(density_capacities): lower_density, lower_capacity = a upper_density, upper_capacity = b if lower_density <= site_density and site_density < upper_density: result = interpolate( lower_density, lower_capacity, upper_density, upper_capacity, site_density ) return result # If not caught between bounds return highest capacity highest_density, highest_capacity = density_capacities[-1] return highest_capacity
c98b25611cf72cc202fea060063f3020732a5282
13,749
def contig_slow(fn, num): """brute force, quadratic""" data = parse(fn) for i in range(len(data)-2): for j in range(i + 2, len(data)-1): s = sum(data[i:j]) if s == num: return min(data[i:j]) + max(data[i:j])
69c086c605afc17a63def6e3958e340ddb7a32c3
13,750
def create_consts(*args) -> superclasses.PyteAugmentedArgList: """ Creates a new list of names. :param args: The args to use. """ return _create_validated(*args, name="consts")
b10ab2d0e30e5cfb54c284d4557a98c0b3eb69c6
13,751
def opts2constr_feat_gen(opts): """Creates ConstFeatPlanes functor by calling its constructor with parameters from opts. Args: opts (obj): Namespace object returned by parser with settings. Returns: const_feat_planes (obj): Instantiated ConstFeatPlanes functor. """ return ConstrFeatGen( opts.const_feat_fac)
5f664aae10f0584aca14ff58d2a984c29fd0dc2d
13,752
def inhibit_activations(activations, times, window_length): """ Remove any activations within a specified time window following a previous activation. TODO - this is extremely slow for non-sparse activations Parameters ---------- activations : ndarray Provided activations times : ndarray (N) Time in seconds of beginning of each frame N - number of time samples (frames) window_length : float Duration (seconds) of inhibition window Returns ---------- activations : ndarray Inhibited activations """ # Keep track of non-inhibited non-zeros pitch_idcs_keep = np.empty(0) frame_idcs_keep = np.empty(0) while True: # Determine the pitch and frame indices where activations begin pitch_idcs, frame_idcs = activations.nonzero() # Check if there are any non-zeros left to process if len(pitch_idcs) == 0 or len(frame_idcs) == 0: # If not, stop looping break # Determine the location of the next non-zero activation next_nz_pitch, next_nz_frame = pitch_idcs[0], frame_idcs[0] # Determine where the inhibition window ends inhibition_end = np.argmax(np.append(times, np.inf) >= times[next_nz_frame] + window_length) # Zero-out the activations in the inhibition window (including the non-zero itself) activations[next_nz_pitch, next_nz_frame : inhibition_end] = 0 # The the non-zero that was just processed pitch_idcs_keep = np.append(pitch_idcs_keep, next_nz_pitch) frame_idcs_keep = np.append(frame_idcs_keep, next_nz_frame) # Add back in all of the non-inhibited non-zeros activations[pitch_idcs_keep.astype(constants.UINT), frame_idcs_keep.astype(constants.UINT)] = 1 return activations
5665ae196430d993d40f5f4941cecde0d79d7a80
13,753
def cmd_convert_items_to_cheetah_list(list): """ Cheetah templates can't iterate over a list of classes, so converts all data into a Cheetah-friendly list of tuples (NAME, DESCRIPTION, ENUM, HAS_BIT_OFFSET, BIT_OFFSET, BITS, TYPE, MIN, MAX, DEFAULT) """ temp = [] for i in list: temp.append(cmd_convert_to_tuple(i)) return temp
ff6933dce38d6ddcd74df72ce321d8f16dfd5074
13,754
def pose223(pose:gtsam.Pose2) -> gtsam.Pose3: """convert a gtsam.Pose2 to a gtsam.Pose3 Args: pose (gtsam.Pose2): the input 2D pose Returns: gtsam.Pose3: the 3D pose with zeros for the unkown values """ return gtsam.Pose3( gtsam.Rot3.Yaw(pose.theta()), gtsam.Point3(pose.x(), pose.y(), 0) )
0a6d738d9cbe035be55a884a1523c985d547f25f
13,755
import random def pivot_calibration_with_ransac(tracking_matrices, number_iterations, error_threshold, concensus_threshold, early_exit=False ): """ Written as an exercise for implementing RANSAC. :param tracking_matrices: N x 4 x 4 ndarray, of tracking matrices. :param number_iterations: the number of iterations to attempt. :param error_threshold: distance in millimetres from pointer position :param concensus_threshold: the minimum percentage of inliers to finish :param early_exit: If True, returns model as soon as thresholds are met :returns: pointer offset, pivot point and RMS Error about centroid of pivot. :raises: TypeError, ValueError """ if number_iterations < 1: raise ValueError("The number of iterations must be > 1") if error_threshold < 0: raise ValueError("The error threshold must be a positive distance.") if concensus_threshold < 0 or concensus_threshold > 1: raise ValueError("The concensus threshold must be [0-1] as percentage") if not isinstance(tracking_matrices, np.ndarray): raise TypeError("tracking_matrices is not a numpy array'") number_of_matrices = tracking_matrices.shape[0] population_of_indices = range(number_of_matrices) minimum_matrices_required = 3 highest_number_of_inliers = -1 best_model = None best_rms = -1 for iter_counter in range(number_iterations): indexes = random.sample(population_of_indices, minimum_matrices_required) sample = tracking_matrices[indexes] try: model, _ = pivot_calibration(sample) except ValueError: print("RANSAC, iteration " + str(iter_counter) + ", failed.") continue # Need to evaluate the number of inliers. # Slow, but it's written as a teaching exercise. world_point = model[3:6] number_of_inliers = 0 inlier_indices = [] for matrix_counter in range(number_of_matrices): offset = np.vstack((model[0:3], 1)) transformed_point = tracking_matrices[matrix_counter] @ offset diff = world_point - transformed_point[0:3] norm = np.linalg.norm(diff) if norm < error_threshold: number_of_inliers = number_of_inliers + 1 inlier_indices.append(matrix_counter) percentage_inliers = number_of_inliers / number_of_matrices # Keep the best model so far, based on the highest number of inliers. if percentage_inliers > concensus_threshold \ and number_of_inliers > highest_number_of_inliers: highest_number_of_inliers = number_of_inliers inlier_matrices = tracking_matrices[inlier_indices] best_model, best_rms = pivot_calibration(inlier_matrices) # Early exit condition, as soon as we find model with enough fit. if percentage_inliers > concensus_threshold and early_exit: return best_model, best_rms if best_model is None: raise ValueError("Failed to find a model using RANSAC.") print("RANSAC Pivot, from " + str(number_of_matrices) + " matrices, used " + str(highest_number_of_inliers) + " matrices, with error threshold = " + str(error_threshold) + " and consensus threshold = " + str(concensus_threshold) ) return best_model, best_rms
0ce7c7bd8afbc88093793601da2b0333b40766cb
13,756
def find_companies_name_dict(): """ Finds companies names and addresses :return: a dict with resource name eg.area of companies and url of available data """ base = "https://data.gov.ro/api/3/action/" query = "Date-de-identificare-platitori" address = url_build.build_url_package_query(base, query) # dictionary with available files and download url data_platitori = {} # check for valid url packages_exists = url_response.valid_url(address) if packages_exists: # find available packages avlb_package = url_response.get_avlb_package(address) # resources are at ['results'][0]['resources'] resources = avlb_package['results'][0]['resources'] # num avl resource num_resources = avlb_package['results'][0]['num_resources'] # sanity check count = 0 # loop over list and build a dict with name of resource and url for x in resources: package_name = x['name'] package_url = x['url'] temp_dict = {package_name: package_url} data_platitori.update(temp_dict) count += 1 # sanity check if count == num_resources: print("all resources founded!") return data_platitori raise Exception("Invalid query to find companies names")
74526747f45a4c5491e4778759baca53a638c97f
13,757
import typing def error_to_response(request: web.Request, error: typing.Union[Error, ErrorList]): """ Convert an :class:`Error` or :class:`ErrorList` to JSON API response. :arg ~aiohttp.web.Request request: The web request instance. :arg typing.Union[Error, ErrorList] error: The error, which is converted into a response. :rtype: ~aiohttp.web.Response """ if not isinstance(error, (Error, ErrorList)): raise TypeError('Error or ErrorList instance is required.') return jsonapi_response( { 'errors': [error.as_dict] if isinstance(error, Error) else error.as_dict, 'jsonapi': request.app[JSONAPI]['jsonapi'] }, status=error.status )
792c3fccd8d7fee708d850169fd943010e92ab05
13,758
def read(handle): """read(handle)""" record = Record() __read_version(record, handle) __read_database_and_motifs(record, handle) __read_section_i(record, handle) __read_section_ii(record, handle) __read_section_iii(record, handle) return record
90921ec1779c313505a838863509838bd858d0b7
13,759
import json def validate_telegam(): """Validate telegram token and chat ID """ configs = InitialConfig() confs = ["chat_id", "bot_token"] conf_dict = {} if request.method == "GET": for conf in confs: conf_dict[conf] = getattr(configs, conf) conf_json = json.dumps(conf_dict) return conf_json if request.headers.get("Content-Type") == "application/json": for conf in confs: value = request.json.get(conf) if not value: return HTTPResponse(f"{conf} should have a value", 400) elif not isinstance(value, str): return HTTPResponse(f"{conf} should be str", 400) else: setattr(configs, conf, value) # Check telegram bot token try: bot = Bot(request.json["bot_token"]) bot.sendMessage(request.json["chat_id"], "Configured") except (InvalidToken, BadRequest, Unauthorized) as error: if error.message == "Unauthorized": error.message += ": Invalid Token" return HTTPResponse(error.message, 400) configs.save() return HTTPResponse("Configured", 200)
f597d75672639dc2a39eb100f7221c508f62cf06
13,760
def hour(e): """ :rtype: Column """ return col(Hour(ensure_column(e)))
492d9e21f2f7c3fd6107dd4000c8273efaa0357c
13,761
def infer_gaussian(data): """ Return (amplitude, x_0, y_0, width), where width - rough estimate of gaussian width """ amplitude = data.max() x_0, y_0 = np.unravel_index(np.argmax(data), np.shape(data)) row = data[x_0, :] column = data[:, y_0] x_0 = float(x_0) y_0 = float(y_0) dx = len(np.where(row - amplitude/2 > 0)[0]) dy = len(np.where(column - amplitude/2 > 0)[0]) width = np.sqrt(dx ** 2. + dy ** 2.) return amplitude, x_0, y_0, width
784e88e5cd58def8467cbe0a851b37cc1fefe9dd
13,762
import math def extract_freq(bins=5, **kwargs): """ Extract frequency bin features. Args: bins (int): The number of frequency bins (besides OOV) Returns: (function): A feature extraction function that returns the log of the \ count of query tokens within each frequency bin. """ def _extractor(query, resources): tokens = query.normalized_tokens stemmed_tokens = query.stemmed_tokens freq_dict = resources[WORD_FREQ_RSC] max_freq = freq_dict.most_common(1)[0][1] freq_features = defaultdict(int) for idx, tok in enumerate(tokens): tok = mask_numerics(tok) if kwargs.get(ENABLE_STEMMING, False): stemmed_tok = stemmed_tokens[idx] stemmed_tok = mask_numerics(stemmed_tok) freq = freq_dict.get(tok, freq_dict.get(stemmed_tok, 0)) else: freq = freq_dict.get(tok, 0) if freq < 2: freq_features["in_vocab:OOV"] += 1 else: # Bin the frequency with break points at # half max, a quarter max, an eighth max, etc. freq_bin = int(math.log(max_freq, 2) - math.log(freq, 2)) if freq_bin < bins: freq_features["in_vocab:IV|freq_bin:{}".format(freq_bin)] += 1 else: freq_features["in_vocab:IV|freq_bin:{}".format(bins)] += 1 q_len = float(len(tokens)) for k in freq_features: # sublinear freq_features[k] = math.log(freq_features[k] + 1, 2) # ratio freq_features[k] /= q_len return freq_features return _extractor
b07f2f1810a26c2d04366d5516aac0ca79b547bb
13,763
import typing def create_private_key_params(key_type: str) -> typing.Type[PrivateKeyParams]: """Returns the class corresponding to private key parameters objects of the given key type name. Args: key_type The name of the OpenSSH key type. Returns: The subclass of :any:`PrivateKeyParams` corresponding to the key type name. Raises: KeyError: There is no subclass of :any:`PrivateKeyParams` corresponding to the given key type name. """ return _KEY_TYPE_MAPPING[key_type].privateKeyParamsClass
6702f93f8cd8dc3fd104db5d63efd7db4bbaa38e
13,764
import json import requests def get_response(msg): """ 访问图灵机器人openApi :param msg 用户输入的文本消息 :return string or None """ apiurl = "http://openapi.tuling123.com/openapi/api/v2" # 构造请求参数实体 params = {"reqType": 0, "perception": { "inputText": { "text": msg } }, "userInfo": { "apiKey": "ca7bf19ac0e644c38cfbe9d6fdc08de1", "userId": "439608" }} # 将表单转换为json格式 content = json.dumps(params) # 发起post请求 r = requests.post(url=apiurl, data=content, verify=False).json() print("r = " + str(r)) # 解析json响应结果 # {'emotion':{ # 'robotEmotion': {'a': 0, 'd': 0, 'emotionId': 0, 'p': 0}, # 'userEmotion': {'a': 0, 'd': 0, 'emotionId': 10300, 'p': 0} # }, # 'intent': { # 'actionName': '', # 'code': 10004, # 'intentName': '' # }, # 'results': [{'groupType': 1, 'resultType': 'text', 'values': {'text': '欢迎来到本机器人的地盘。'}}]} code = r['intent']['code'] if code == 10004 or code == 10008: message = r['results'][0]['values']['text'] return message return None
9a542b56a3ed3db8b8a9306ea3d425054a4ca64b
13,765
import torch def lm_sample_with_constraints(lm_model, max_decode_steps, use_cuda, device, batch_size=1, alpha_0=1, alpha=1, beta=0, repeat_penalty=0, history_penalty=0, history_penalty_beta=0, penalty_vocab_start=-1, penalty_vocab_end=-1, prefix=None, gamma=1, normalize="none", top_k=-1, top_k0=-1, top_p=-1, top_p0=-1, eos=None, need_mask_unk=True, return_states=False): """ """ if eos is None: eos = lm_model.EOS dec_states = lm_model.init_search() search_states = init_search(lm_model, batch_size) if use_cuda == True: search_states = nested_to_cuda(search_states, device) y = search_states[0] log_probs = search_states[1] finished = search_states[2] mask_finished = search_states[3] hypothesis = search_states[4] history_log_probs = search_states[5] gamma = torch.tensor(gamma, dtype=torch.float, device=y.device) mask_unk = None if need_mask_unk == True: mask_unk = get_single_token_mask(lm_model.trg_vocab_size, lm_model.UNK, lm_model.MIN_LOGITS) if use_cuda == True: mask_unk = nested_to_cuda(mask_unk, device) steps = 0 trg_seq_len = 0 vocab_size = lm_model.trg_vocab_size max_decode_steps = min(max_decode_steps, lm_model.trg_max_len - trg_seq_len) while not finished.all() and steps < max_decode_steps: outputs = lm_model.decoder._step(steps, dec_states, y) dec_states, logits = outputs[0:2] if mask_unk is not None: logits += mask_unk if steps > 1 and repeat_penalty < 0: logits += get_multi_token_mask(hypothesis, vocab_size, -2, steps, repeat_penalty, 0, penalty_vocab_start, penalty_vocab_end) if steps > 2 and history_penalty < 0: logits += get_multi_token_mask(hypothesis, vocab_size, 0, -2, history_penalty, history_penalty_beta, penalty_vocab_start, penalty_vocab_end) mask = finished.type(torch.float) mask_logits = logits * (1 - mask) + mask_finished * mask _log_probs = F.log_softmax(logits, 1) temp = alpha_0 if steps > 0: temp = alpha + steps * beta if prefix is not None and steps < prefix.size(1): is_prefix = (prefix[:,steps:steps+1]).ne(lm_model.PAD).float() prefix_mask = torch.zeros_like(mask_logits) prefix_mask.scatter_(1, prefix[:, steps:steps+1], lm_model.MAX_LOGITS) mask_logits += (prefix_mask * is_prefix) indice = top_k_top_p_sampling(mask_logits, -1, -1) elif steps == 0: indice = top_k_top_p_sampling(mask_logits, top_k0, top_p0, temp) else: indice = top_k_top_p_sampling(mask_logits, top_k, top_p, temp) y = (indice % vocab_size).view(-1, 1) finished = (finished | y.eq(eos).byte()) hypothesis = torch.cat([hypothesis, y], 1) _log_probs = torch.gather(_log_probs, 1, indice) log_probs = log_probs + _log_probs * (1 - mask) history_log_probs = torch.cat([history_log_probs, _log_probs], 1) steps += 1 trg_seq_len += 1 hyp_len = torch.sum(hypothesis.ne(lm_model.PAD).float(), 1) normalized_score = \ normalize_log_probs(log_probs, hyp_len, gamma, normalize) outputs = [hypothesis, normalized_score] if return_states == True: outputs = [hypothesis, normalized_score, history_log_probs, dec_states, y, log_probs, finished, mask_finished] return outputs
cbccd2c0a2b91fa3e5ff5efb8b394fe7418f5b8b
13,766
import torch import tqdm def validate_official(args, data_loader, model, global_stats=None): """Run one full official validation. Uses exact spans and same exact match/F1 score computation as in the SQuAD script. Extra arguments: offsets: The character start/end indices for the tokens in each context. texts: Map of qid --> raw text of examples context (matches offsets). answers: Map of qid --> list of accepted answers. """ eval_time = Timer() # Run through examples examples = 0 map = AverageMeter() mrr = AverageMeter() prec_1 = AverageMeter() prec_3 = AverageMeter() prec_5 = AverageMeter() with torch.no_grad(): pbar = tqdm(data_loader) for ex in pbar: ids, batch_size = ex['ids'], ex['batch_size'] scores = model.predict(ex) predictions = np.argsort(-scores.cpu().numpy()) # sort in descending order labels = ex['label'].numpy() map.update(MAP(predictions, labels)) mrr.update(MRR(predictions, labels)) prec_1.update(precision_at_k(predictions, labels, 1)) prec_3.update(precision_at_k(predictions, labels, 3)) prec_5.update(precision_at_k(predictions, labels, 5)) if global_stats is None: pbar.set_description('[testing ... ]') else: pbar.set_description("%s" % 'Epoch = %d [validating... ]' % global_stats['epoch']) examples += batch_size result = dict() result['map'] = map.avg result['mrr'] = mrr.avg result['prec@1'] = prec_1.avg result['prec@3'] = prec_3.avg result['prec@5'] = prec_5.avg if global_stats is None: logger.info('test results: MAP = %.2f | MRR = %.2f | Prec@1 = %.2f | ' % (result['map'], result['mrr'], result['prec@1']) + 'Prec@3 = %.2f | Prec@5 = %.2f | examples = %d | ' % (result['prec@3'], result['prec@5'], examples) + 'time elapsed = %.2f (s)' % (eval_time.time())) else: logger.info('valid official: Epoch = %d | MAP = %.2f | ' % (global_stats['epoch'], result['map']) + 'MRR = %.2f | Prec@1 = %.2f | Prec@3 = %.2f | ' % (result['mrr'], result['prec@1'], result['prec@3']) + 'Prec@5 = %.2f | examples = %d | valid time = %.2f (s)' % (result['prec@5'], examples, eval_time.time())) return result
09385e491c25ac238aebabe7d887f73b4c0bd091
13,767
def tuple_list_to_lua(tuple_list): """Given a list of tuples, return a lua table of tables""" def table(it): return "{" + ",".join(map(str, it)) + "}" return table(table(t) for t in tuple_list)
71ec1a29f5e23b8bf82867617fe157fbba4a2332
13,768
def reset_user_messages(request: Request): """ For given user reset his notifications. """ profile: Profile = get_object_or_404(Profile, user=request.user) profile.messages = 0 profile.save() return Response(status=status.HTTP_200_OK)
628347dea707b0bd2ecc63cc004a3f62cb85e967
13,769
import functools def define_scope(function, scope=None, *args, **kwargs): """ A decorator for functions that define TensorFlow operations. The wrapped function will only be executed once. Subsequent calls to it will directly return the result so that operations are added to the graph only once. The operations added by the function live within a tf.variable_scope(). If this decorator is used with arguments, they will be forwarded to the variable scope. The scope name defaults to the name of the wrapped function. """ attribute = '_cache_' + function.__name__ name = scope or function.__name__ @property @functools.wraps(function) def decorator(self): if not hasattr(self, attribute): with tf.variable_scope(name, *args, **kwargs): setattr(self, attribute, function(self)) return getattr(self, attribute) return decorator
988f2f711dc227bfe8df5c7074d354c37d079fdb
13,770
from typing import Union from typing import List from typing import Dict import yaml from typing import OrderedDict def load_yaml(fname: str) -> Union[List, Dict]: """Load a YAML file.""" try: with open(fname, encoding='utf-8') as conf_file: # If configuration file is empty YAML returns None # We convert that to an empty dict return yaml.load(conf_file, Loader=SafeLineLoader) or OrderedDict() except yaml.YAMLError as exc: _LOGGER.error(exc) raise HomeAssistantError(exc) except UnicodeDecodeError as exc: _LOGGER.error("Unable to read file %s: %s", fname, exc) raise HomeAssistantError(exc)
5fd0b9d2dea7d07b7bb98f6a9ae3ce98be3962e0
13,771
def fancy_vector(v): """ Returns a given 3-vector or array in a cute way on the shell, if you use 'print' on the return value. """ return "\n / %5.2F \\\n" % (v[0]) + \ " | %5.2F |\n" % (v[1]) + \ " \\ %5.2F /\n" % (v[2])
2340f22aa87da00abad30b9946c374f34b38496d
13,772
def findpath_split(seq, ss1, ss2, md, th = 5, w = None): """ Calculate findpath barriers for smaller components. Args: seq: RNA sequence. ss1: Structure 1. ss2: Structure 2. md: ViennaRNA model details. th: Threshold of how many basepairs must change for an independent findpath run. Defaults to 5. w: Findpath width. Defaults to None. Returns: path, barrier: The folding path and the barrier height. WARNING: If path splitting actually took place, then energy values given in the path data are only relative to the starting structure. """ pt1 = make_pair_table(ss1, base = 0, chars = list('.x')) pt2 = make_pair_table(ss2, base = 0, chars = list('.x')) mindiff = None recurse = None for ij in chain(common_exterior_bases(pt1, pt2), common_basepairs(pt1, pt2)): (i, j) = ij if isinstance(ij, tuple) else (ij, None) st1O, st1I = split_struct(ss1, i, j, spacer = '...') st2O, st2I = split_struct(ss2, i, j, spacer = '...') do = RNA.bp_distance(st1O, st2O) if do < th: continue di = RNA.bp_distance(st1I, st2I) if di < th: continue diff = abs(di-do) if mindiff is None or diff < mindiff: mindiff = diff seqO, seqI = split_struct(seq, i, j, spacer = 'NNN') recurse = ((i, j), (seqO, st1O, st2O), (seqI, st1I, st2I)) elif mindiff is not None and diff > mindiff: # No need to check the rest if we are getting worse. break if mindiff is not None: pathO, _ = findpath_split(*recurse[1], md, th, w) pathI, _ = findpath_split(*recurse[2], md, th, w) return findpath_merge(pathO, pathI, *recurse[0]) else: fpw = 4 * RNA.bp_distance(ss1, ss2) if w is None else w return call_findpath(seq, ss1, ss2, md, w = fpw)
2d52102df31dd014ac60e28c7258bff833353b6a
13,773
def get_root_relative_url(url_path): """Remove the root page slug from the URL path""" return _clean_rel_url('/'.join(url_path.split('/')[2:]))
7aca9c0ec8856615fe1777117f44a259d7b597c7
13,774
import os import subprocess def run_command(command, split=False, include_errors=False, cwd=None, shell=False, env=None): """Run command in subprocess and return exit code and output""" sub_env = os.environ.copy() if env is not None: sub_env.update(env) if include_errors: error_pipe = subprocess.STDOUT else: error_pipe = subprocess.PIPE process = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=error_pipe, shell=shell, universal_newlines=True, cwd=cwd, env=sub_env ) if split: output = process.stdout.readlines() else: output = process.stdout.read() return_code = process.wait() logger.debug('subprocess %s returned %d, output: %s', command, return_code, output) return return_code, output
f3d4c0c1180a513872d6821a763495f1b97e85ee
13,775
def exclusion_windows_matching(match_peaks): """ Discard the occurrences of matching and non-matchign ions when they are found in the window (+-losses_window_removal) around M-xx or free bases ions """ output_dic = match_peaks for key in match_peaks: if match_peaks[key]: for t in match_peaks[key]: mass_losses_list, new_list = find_losses_freebases(match_peaks[key][t][7:]), [] for ion in match_peaks[key][t][7:]: # Keep ion losses and free bases matched in the MS2_matches list if 'M-' not in ion[1] and len(ion[1].split('(')[0]) != 1: flag, mz_ion = 1, np.float64(ion[2]) for mass_loss in mass_losses_list: # Add the MS2 offset mass_loss_offseted = mass_loss + ppm_range(mass_loss, MS2_ppm_offset) # Check and discard any sequencing ion is found in the M-xx exclusion window if mass_loss_offseted - args.losses_window_removal <= \ mz_ion <= mass_loss_offseted + args.losses_window_removal: flag = 0 break if flag == 1: new_list.append(ion) else: new_list.append(ion) output_dic[key].update({t: match_peaks[key][t][:7] + new_list}) return output_dic
9c2b5bcdb283b197102d50fdd2aaa8eb49e2fc3b
13,776
def any_of(elements): """ Check to see if the argument is contained in a list of possible elements. :param elements: The elements to check the argument against in the predicate. :return: A predicate to check if the argument is a constituent element. """ def predicate(argument): return argument in elements return predicate
adacf8fd632d25452d22dab0a8a439021083ec83
13,777
def find_year(films_lst: list, year: int): """ Filter list of films by given year """ filtered_films_lst = [line for line in films_lst if line[1] == str(year)] return filtered_films_lst
f4c11e09e76831afcf49154234dd57044536bce1
13,778
def func_BarPS(HA_Open, HA_Close, HA_PS_Lookback, PS_pct_level=[0.35, 0.5, 0.95, 0.97], combine=False): """ 0. This function is for calculating price trend number of HA bar, by looking back HA_PS_Lookback HA bars, according to the previous bars' distribution, find the range (i.e. -4,-3,-2,-1,0,1,2,3,4) of the current bar. 1. This function has 5 arguments (one optional) and returns 1 DataFrame as output. 2. Input arguements including: (1) HA_Open: Dataframe (2) HA_Close: DataFrame (3) HA_PS_Lookback: int, number of bars to lookback. (4) PS_pct_level: list, optional, default value is [0.35, 0.5, 0.95, 0.97] (5) combine: boolean, optional, default value is False, calculating the up bar and down bar separately, while combine=True calculates the up bar and down bar combined. 3. Output is 1 DataFrame (1) HA_PS: Showed as -4,3,-2,-1,0,1,2,3,4, indicating the size of HA bars. """ # Initialize: HA_num = len(HA_Open) HA_PS = np.zeros_like(HA_Open) HA_Open = HA_Open.values HA_Close = HA_Close.values # Main: for i in range(HA_PS_Lookback, HA_num): HA_Open_lb = HA_Open [i-HA_PS_Lookback:i] HA_Close_1b = HA_Close[i-HA_PS_Lookback:i] HA_PS_positive_level, HA_PS_negative_level = func_PS_Level(HA_Open_lb, HA_Close_1b, PS_pct_level, combine) HA_range = HA_Close[i] - HA_Open[i] if HA_range > 0: HA_PS_temp = np.where(HA_range <= HA_PS_positive_level)[0] + 1 if len(HA_PS_temp) != 0: HA_PS[i] = HA_PS_temp[0] - 1 else: HA_PS[i] = len(HA_PS_positive_level) # -1 if HA_range < 0: HA_PS_temp = np.where(HA_range >= HA_PS_negative_level)[0] + 1 if len(HA_PS_temp) != 0: HA_PS[i] = -HA_PS_temp[0] + 1 else: HA_PS[i] = -len(HA_PS_negative_level) # +1 HA_PS_df = pd.DataFrame(HA_PS, columns=['PS']) return HA_PS_df
8a57de8ee4e832afd6327afc808668d227bc2592
13,779
from typing import List def filter_whitespace(stream: List[Part]) -> List[Part]: """Remove whitespace tokens""" return flu(stream).filter(lambda x: x.token != Token.WHITESPACE).collect()
aa3b8d109b0d85db7c3aa286858426276afb80ba
13,780
def merge_partial_dicts(interfaces_dict, partials_dict): """Merges partial interface into non-partial interface. Args: interfaces_dict: A dict of the non-partial interfaces. partial_dict: A dict of partial interfaces. Returns: A merged dictionary of |interface_dict| with |partial_dict|. """ for interface_name, partial in partials_dict.iteritems(): interface = interfaces_dict.get(interface_name) if not interface: raise Exception('There is a partial interface, but the corresponding non-partial interface was not found.') for member in _MEMBERS: interface[member].extend(partial.get(member)) interface.setdefault(_PARTIAL_FILEPATH, []).append(partial[_FILEPATH]) return interfaces_dict
7efc47325e1af5c06b19c1bba02ec8b53d9473e0
13,781
def gen_code_def_part(metadata): """生成代码中定义类的部分。 """ class_def_dict = validate(metadata) class_def_list = list(class_def_dict.values()) code = templates.t_def_all_class.render(class_def_list=class_def_list) return code
df01fb69984a0ba471a6e5ac24bbecb0a622dd1b
13,782
def clang_plusplus_frontend(input_file, args): """Generate LLVM IR from C++ language source(s).""" compile_command = default_clang_compile_command(args) compile_command[0] = llvm_exact_bin('clang++') return compile_to_bc(input_file, compile_command, args)
1b2701f3e0fac240843b302dd2056bac857ecb74
13,783
from django.db.models import Model def create_forward_many_to_many_manager(superclass, rel, reverse): """ Create a manager for the either side of a many-to-many relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-many relations. """ class ManyRelatedManager(superclass): def __init__(self, instance=None): super().__init__() self.instance = instance if not reverse: self.model = rel.model self.query_field_name = rel.field.related_query_name() self.prefetch_cache_name = rel.field.name self.source_field_name = rel.field.m2m_field_name() self.target_field_name = rel.field.m2m_reverse_field_name() self.symmetrical = rel.symmetrical else: self.model = rel.related_model self.query_field_name = rel.field.name self.prefetch_cache_name = rel.field.related_query_name() self.source_field_name = rel.field.m2m_reverse_field_name() self.target_field_name = rel.field.m2m_field_name() self.symmetrical = False self.through = rel.through self.reverse = reverse self.source_field = self.through._meta.get_field(self.source_field_name) self.target_field = self.through._meta.get_field(self.target_field_name) self.core_filters = {} self.pk_field_names = {} for lh_field, rh_field in self.source_field.related_fields: core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name) self.core_filters[core_filter_key] = getattr(instance, rh_field.attname) self.pk_field_names[lh_field.name] = rh_field.name self.related_val = self.source_field.get_foreign_related_value(instance) if None in self.related_val: raise ValueError('"%r" needs to have a value for field "%s" before ' 'this many-to-many relationship can be used.' % (instance, self.pk_field_names[self.source_field_name])) # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if instance.pk is None: raise ValueError("%r instance needs to have a primary key value before " "a many-to-many relationship can be used." % instance.__class__.__name__) def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse) return manager_class(instance=self.instance) do_not_call_in_templates = True def _build_remove_filters(self, removed_vals): filters = Q(**{self.source_field_name: self.related_val}) # No need to add a subquery condition if removed_vals is a QuerySet without # filters. removed_vals_filters = (not isinstance(removed_vals, QuerySet) or removed_vals._has_filters()) if removed_vals_filters: filters &= Q(**{'%s__in' % self.target_field_name: removed_vals}) if self.symmetrical: symmetrical_filters = Q(**{self.target_field_name: self.related_val}) if removed_vals_filters: symmetrical_filters &= Q( **{'%s__in' % self.source_field_name: removed_vals}) filters |= symmetrical_filters return filters def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) return queryset._next_is_sticky().filter(**self.core_filters) def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) query = {'%s__in' % self.query_field_name: instances} queryset = queryset._next_is_sticky().filter(**query) # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that # there will already be a join on the join table, so we can just add # the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. fk = self.through._meta.get_field(self.source_field_name) join_table = fk.model._meta.db_table connection = connections[queryset.db] qn = connection.ops.quote_name queryset = queryset.extra(select={ '_prefetch_related_val_%s' % f.attname: '%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields}) return ( queryset, lambda result: tuple( getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields ), lambda inst: tuple( f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields ), False, self.prefetch_cache_name, False, ) def add(self, *objs, through_defaults=None): self._remove_prefetched_objects() db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): self._add_items( self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults, ) # If this is a symmetrical m2m relation to self, add the mirror # entry in the m2m table. `through_defaults` aren't used here # because of the system check error fields.E332: Many-to-many # fields with intermediate tables must not be symmetrical. if self.symmetrical: self._add_items(self.target_field_name, self.source_field_name, *objs) add.alters_data = True def remove(self, *objs): self._remove_prefetched_objects() self._remove_items(self.source_field_name, self.target_field_name, *objs) remove.alters_data = True def clear(self): db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): signals.m2m_changed.send( sender=self.through, action="pre_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) self._remove_prefetched_objects() filters = self._build_remove_filters(super().get_queryset().using(db)) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) clear.alters_data = True def set(self, objs, *, clear=False, through_defaults=None): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, through_defaults=through_defaults) else: old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True)) new_objs = [] for obj in objs: fk_val = ( self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else obj ) if fk_val in old_ids: old_ids.remove(fk_val) else: new_objs.append(obj) self.remove(*old_ids) self.add(*new_objs, through_defaults=through_defaults) set.alters_data = True def create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) self.add(new_obj, through_defaults=through_defaults) return new_obj create.alters_data = True def get_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created get_or_create.alters_data = True def update_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created update_or_create.alters_data = True def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None): # source_field_name: the PK fieldname in join table for the source object # target_field_name: the PK fieldname in join table for the target object # *objs - objects to add. Either object instances, or primary keys of object instances. through_defaults = through_defaults or {} # If there aren't any objects, there is nothing to do. if objs: new_ids = set() for obj in objs: if isinstance(obj, self.model): if not router.allow_relation(obj, self.instance): raise ValueError( 'Cannot add "%r": instance is on database "%s", value is on database "%s"' % (obj, self.instance._state.db, obj._state.db) ) fk_val = self.through._meta.get_field( target_field_name).get_foreign_related_value(obj)[0] if fk_val is None: raise ValueError( 'Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name) ) new_ids.add(fk_val) elif isinstance(obj, Model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) else: new_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) vals = (self.through._default_manager.using(db) .values_list(target_field_name, flat=True) .filter(**{ source_field_name: self.related_val[0], '%s__in' % target_field_name: new_ids, })) new_ids.difference_update(vals) with transaction.atomic(using=db, savepoint=False): if self.reverse or source_field_name == self.source_field_name: # Don't send the signal when we are inserting the # duplicate data row for symmetrical reverse entries. signals.m2m_changed.send( sender=self.through, action='pre_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=new_ids, using=db, ) # Add the ones that aren't there already self.through._default_manager.using(db).bulk_create([ self.through(**through_defaults, **{ '%s_id' % source_field_name: self.related_val[0], '%s_id' % target_field_name: obj_id, }) for obj_id in new_ids ]) if self.reverse or source_field_name == self.source_field_name: # Don't send the signal when we are inserting the # duplicate data row for symmetrical reverse entries. signals.m2m_changed.send( sender=self.through, action='post_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=new_ids, using=db, ) def _remove_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK colname in join table for the source object # target_field_name: the PK colname in join table for the target object # *objs - objects to remove. Either object instances, or primary # keys of object instances. if not objs: return # Check that all the objects are of the right type old_ids = set() for obj in objs: if isinstance(obj, self.model): fk_val = self.target_field.get_foreign_related_value(obj)[0] old_ids.add(fk_val) else: old_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): # Send a signal to the other end if need be. signals.m2m_changed.send( sender=self.through, action="pre_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) target_model_qs = super().get_queryset() if target_model_qs._has_filters(): old_vals = target_model_qs.using(db).filter(**{ '%s__in' % self.target_field.target_field.attname: old_ids}) else: old_vals = old_ids filters = self._build_remove_filters(old_vals) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) return ManyRelatedManager
c9c45ae0eca4a913affab0ed832a0568e46d9a4c
13,784
def grp_render_dashboard_module(context, module, index=None, subindex=None): """ Template tag that renders a given dashboard module, it takes a ``DashboardModule`` instance as first parameter and an integer ``index`` as second parameter, that is the index of the module in the dashboard. """ module.init_with_context(context) context.update({ 'template': module.template, 'module': module, 'index': index, 'subindex': subindex, 'admin_url': reverse('%s:index' % get_admin_site_name(context)), }) return context
515bf427c4c39dc28479f6fe7121dc1cb542745e
13,785
def serialize_routing(value, explicit_type=None): """Custom logic to find matching serialize implementation and returns it's unique registration string key :param value: instance to serialize :param explicit_type: explicit serialization type for value :return: str key to find proper serialize implementation """ value_type = data_type(value, explicit_type) if DICT_DATA_TYPE.match(value_type): return "dict" if LIST_DATA_TYPE.match(value_type): return "list" if TUPLE_DATA_TYPE.match(value_type): return "tuple" return value_type
792cb24fc68060fe7e24f064f411752c5d787c3d
13,786
def get_projection_matricies(az, el, distance_ratio, roll = 0, focal_length=35, img_w=137, img_h=137): """ Calculate 4x3 3D to 2D projection matrix given viewpoint parameters. Code from "https://github.com/Xharlie/DISN" """ F_MM = focal_length # Focal length SENSOR_SIZE_MM = 32. PIXEL_ASPECT_RATIO = 1. # pixel_aspect_x / pixel_aspect_y RESOLUTION_PCT = 100. SKEW = 0. CAM_MAX_DIST = 1.75 CAM_ROT = np.asarray([[1.910685676922942e-15, 4.371138828673793e-08, 1.0], [1.0, -4.371138828673793e-08, -0.0], [4.371138828673793e-08, 1.0, -4.371138828673793e-08]]) # Calculate intrinsic matrix. scale = RESOLUTION_PCT / 100 # print('scale', scale) f_u = F_MM * img_w * scale / SENSOR_SIZE_MM f_v = F_MM * img_h * scale * PIXEL_ASPECT_RATIO / SENSOR_SIZE_MM # print('f_u', f_u, 'f_v', f_v) u_0 = img_w * scale / 2 v_0 = img_h * scale / 2 K = np.matrix(((f_u, SKEW, u_0), (0, f_v, v_0), (0, 0, 1))) # Calculate rotation and translation matrices. # Step 1: World coordinate to object coordinate. sa = np.sin(np.radians(-az)) ca = np.cos(np.radians(-az)) se = np.sin(np.radians(-el)) ce = np.cos(np.radians(-el)) R_world2obj = np.transpose(np.matrix(((ca * ce, -sa, ca * se), (sa * ce, ca, sa * se), (-se, 0, ce)))) # Step 2: Object coordinate to camera coordinate. R_obj2cam = np.transpose(np.matrix(CAM_ROT)) R_world2cam = R_obj2cam * R_world2obj cam_location = np.transpose(np.matrix((distance_ratio * CAM_MAX_DIST, 0, 0))) T_world2cam = -1 * R_obj2cam * cam_location # Step 3: Fix blender camera's y and z axis direction. R_camfix = np.matrix(((1, 0, 0), (0, -1, 0), (0, 0, -1))) R_world2cam = R_camfix * R_world2cam T_world2cam = R_camfix * T_world2cam RT = np.hstack((R_world2cam, T_world2cam)) # finally, consider roll cr = np.cos(np.radians(roll)) sr = np.sin(np.radians(roll)) R_z = np.matrix(((cr, -sr, 0), (sr, cr, 0), (0, 0, 1))) rot_mat = get_rotate_matrix(-np.pi / 2) return K, R_z@RT@rot_mat
a8ef5852510982851487e349336e41e61f7b582e
13,787
def pix_to_coord(edges, pix, interp="lin"): """Convert pixel coordinates to grid coordinates using the chosen interpolation scheme.""" scale = interpolation_scale(interp) interp_fn = interp1d( np.arange(len(edges), dtype=float), scale(edges), fill_value="extrapolate" ) return scale.inverse(interp_fn(pix))
db9fcc47a273e9b39f6d5b6a39b59146866e5dd4
13,788
def create_action_urls(actions, model=None, **url_args): """ Creates a list of URLs for the given actions. """ urls = {} if len(actions) > 0: # Resolve the url_args values as attributes from the model values = {} for arg in url_args: values[arg] = getattr(model, url_args[arg]) # Generate the URL for every action for action in actions: urls[action] = flask.url_for(actions[action], **values) return urls
f26477e0d046bfe6f73f25b2b086fad3b05a2646
13,789
def check_vfvx(x0, fx, fx_args, dfx, dfx_args=None, delta=1e-5): """ Check derivatives of a (vectorized) vector or scalar function of a vector variable. """ if x0.ndim != 2: raise ValueError('The variable must have two dimensions!') if dfx_args is None: dfx_args = fx_args dfx_a = dfx(x0, *dfx_args) dfx_d = nm.zeros_like(dfx_a) for ic in range(x0.shape[1]): x = x0.copy() x[:, ic] += delta f1 = fx(x, *fx_args) x = x0.copy() x[:, ic] -= delta f2 = fx(x, *fx_args) dfx_d[:, ic] = 0.5 * (f1 - f2) / delta error = nm.linalg.norm((dfx_a - dfx_d).ravel(), nm.inf) print('analytical:', dfx_a) print('difference:', dfx_d) print('error:', error) return dfx_a, dfx_d, error
a8cffcbf118394a1ea5d65835bade516035fe9fe
13,790
def add_hovertool(p1, cr_traj, traj_src, sat_src, traj_df): """Adds a hovertool to the top panel of the data visualization tool plot.""" # Create the JS callback for vertical line on radar plots. callback_htool = CustomJS(args={'traj_src':traj_src,'sat_src':sat_src}, code=""" const indices = cb_data.index["1d"].indices[0]; var data_traj = traj_src.data var t_traj = data_traj['t'] const t_val = t_traj[indices] var data_sat = sat_src.data; var t_sat = data_sat['t'] t_sat[0] = t_val t_sat[1] = t_val sat_src.change.emit(); """) # Add the hovertool for the satellite trajectory points on top panel, which are # linked to the vertical line on the bottom panel. htool_mode = ('vline' if max(traj_df['y'])-min(traj_df['y'])<= (max(traj_df['x'])-min(traj_df['x'])) else 'hline') tooltips1 = [("lat", "@lat"),("lon", "@lon"),('time','@t_str')] p1.add_tools(HoverTool(renderers=[cr_traj],callback=callback_htool, mode=htool_mode,tooltips=tooltips1)) return p1
f45245df7cd81ee8f0fc486d460be0c4338fd921
13,791
def backpage_url_to_sitekey(url): """http://longisland.backpage.com/FemaleEscorts/s-mny-oo-chics-but-oo-nn-lik-oo-me-19/40317377""" (scheme, netloc, path, params, query, fragment) = urlparse(url) sitekey = netloc.split('.')[0] return sitekey
6efa6d0bf73ab297144a7c6a35dbba920f77789e
13,792
import torch def batch_eye_like(X: torch.Tensor): """Return batch of identity matrices like given batch of matrices `X`.""" return torch.eye(*X.shape[1:], out=torch.empty_like(X))[None, :, :].repeat(X.size(0), 1, 1)
266ee5639ce303b81e2cb82892e64f37a09695ff
13,793
def cal_occurence(correspoding_text_number_list): """ calcualte each occurence of a number in a list """ di = dict() for i in correspoding_text_number_list: i = str(i) s = di.get(i, 0) if s == 0: di[i] = 1 else: di[i] = di[i] + 1 return di
aafabc6abdf4bf1df1b8d9e23a4af375df3ac75b
13,794
def subtract(v: Vector, w: Vector) -> Vector: """simple vector subtraction""" assert len(v) == len(w), 'Vectors need to have the same length' return [vi - wi for vi, wi in zip(v, w)]
c6f8a9b19e07206a4d2637557c721bd97ad56363
13,795
from re import I def f1(): """ Filtering 1D. """ # Get center of the filter c = int((size - 1) / 2) # Pad the flatten (1D array) image with wrapping If = np.pad(I.flatten(), (c), 'wrap') # Initialize the resulting image Ir = np.zeros(If.shape) # Apply 1D convulation in the image for x in range(c, Ir.shape[0] - c): Ir[x] = conv_point1d(If, filter, x, c) # Remove padding Ir = Ir[c:-c] # Return the resulting image with original shape return Ir.reshape(I.shape)
cc50f089148cdbaffbbdc7d6d734a066a6b08722
13,796
from typing import Optional def _unify_data_and_user_kwargs( data: 'LayerData', kwargs: Optional[dict] = None, layer_type: Optional[str] = None, fallback_name: str = None, ) -> 'FullLayerData': """Merge data returned from plugins with options specified by user. If ``data == (_data, _meta, _type)``. Then: - ``kwargs`` will be used to update ``_meta`` - ``layer_type`` will replace ``_type`` and, if provided, ``_meta`` keys will be pruned to layer_type-appropriate kwargs - ``fallback_name`` is used if ``not _meta.get('name')`` .. note: If a user specified both layer_type and additional keyword arguments to viewer.open(), it is their responsibility to make sure the kwargs match the layer_type. Parameters ---------- data : LayerData 1-, 2-, or 3-tuple with (data, meta, layer_type) returned from plugin. kwargs : dict, optional User-supplied keyword arguments, to override those in ``meta`` supplied by plugins. layer_type : str, optional A user-supplied layer_type string, to override the ``layer_type`` declared by the plugin. fallback_name : str, optional A name for the layer, to override any name in ``meta`` supplied by the plugin. Returns ------- FullLayerData Fully qualified LayerData tuple with user-provided overrides. """ _data, _meta, _type = _normalize_layer_data(data) if layer_type: # the user has explicitly requested this be a certain layer type # strip any kwargs from the plugin that are no longer relevant _meta = prune_kwargs(_meta, layer_type) _type = layer_type if kwargs: # if user provided kwargs, use to override any meta dict values that # were returned by the plugin. We only prune kwargs if the user did # *not* specify the layer_type. This means that if a user specified # both layer_type and additional keyword arguments to viewer.open(), # it is their responsibility to make sure the kwargs match the # layer_type. _meta.update(prune_kwargs(kwargs, _type) if not layer_type else kwargs) if not _meta.get('name') and fallback_name: _meta['name'] = fallback_name return (_data, _meta, _type)
c0d472ef60bf69d67ef50d435715bfabe11c229e
13,797
def get_sentence_embeddings(data): """ data -> list: list of text """ features = temb.batch_tokenize(data, tokenizer) dataset = temb.prepare_dataset(features) embeddings = temb.compute_embeddings(dataset, model) return embeddings
cb1badd5cf9a244d7af8b40d219a467d0dff811e
13,798
def sample_user(phone="+989123456789", full_name="testname"): """ Create a sample user """ return get_user_model().objects.create_user(phone=phone, full_name=full_name)
418b12b4249c4beda4fed36664f2c9eb14f8adc4
13,799