content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_renders_df(product_df, order_df, user_df, address_df, num_days=90): """ Renders - All requested renders from order, both customer and tester """ renders_df = pd.merge(product_df, order_df, how='left', on='order_id', suffixes=(None, '_order')) renders_df = pd.merge(renders_df, user_df, how='left', left_on='user_id', right_on='id', suffixes=(None, '_user')) renders_df = pd.merge(renders_df, address_df, how='left', on='user_id', suffixes=(None, '_address')) renders_df = renders_df.rename(columns={'data_product': 'data', 'timestamp_product': 'timestamp'}) renders_df = renders_df[renders_df['is_in_cart'] == True] renders_df = renders_df.dropna(subset=['state']) renders_df['product_data'] = renders_df['data'].apply(lambda x: x.get('data', {})) return renders_df
26051e774a0be83687fa65f0a737cee50b88d55f
2,800
def check_sparsity_level(model, config, ref_sparsity_level): """ Check that sparsity level of the model is equal to reference sparse level. """ sparsity_algo = MagnitudeSparsity(config, None) all_weights_nodes = sparsity_algo._get_all_weights_nodes(model) all_weights = [get_node_value(w_node).flatten() for w_node in all_weights_nodes] all_weights = np.concatenate(all_weights) sparsity_level = np.sum(all_weights == 0) / len(all_weights) return np.isclose(sparsity_level, ref_sparsity_level)
dc2921a56080ea82d39f3e5bcd42f51ef510d969
2,801
import typing def new( name: str, data: typing.Optional[bytes] = b"", digest_size: typing.Optional[int] = None, *, custom: typing.Optional[bytes] = None, # cshakes, kangarootwelve key: typing.Optional[bytes] = None, # for blakes ) -> Hash: """ Instantiate a hash object. Args: name: The name of the hash function. data: The initial chunk of message to feed to hash. Note that for ``TupleHash`` variants, even an empty byte string changes its internal state. digest_size: The length of the digest size. Must be supplied if the hash function supports it. Keyword Args: custom: A customization string. Can be supplied for hash functions that support domain separation. key: A key that is used to compute the MAC. Can be supplied for hash functions that support working as cryptographic MAC. Raises: KeyError: If ``name`` is not a hash function name. ValueError: If ``digest_size`` is required but not provided. """ return Hash(name, data, digest_size=digest_size, custom=custom, key=key)
aecb6a7783f39a25c781d6fb869b3aecac99d4bd
2,802
def stringify_addresses(addresses): """ Converts a list of addresses into a string in the `"John Doe" <[email protected]>, "Jane" <[email protected]>"` format, which can be directly used in the headers of an email. Parameters ---------- addresses : (str or (str, str)) or list of (str or (str, str)) A single address or a list of addresses which is to be converted into a single string. Each element can be either an email address or a tuple of a name and an email address. Returns ------- str The address(es) as a single string which can be directly used in the headers of an email. """ if isinstance(addresses, list): addresses = [stringify_address(address) for address in addresses] return ', '.join(addresses) else: return stringify_address(addresses)
5a970730d39469a7aa66e220fcbd3fb4de28ecc5
2,803
def validator_map_size(string): """ Validator for map size input Raises InputError with error description if string is not valid :param string: String to check :return: Bool, if success """ result = False if string.isdigit(): size = int(string) if 5 <= size <= 100: result = True else: raise InputError("Unacceptable map size! Try again") else: raise InputError("Input is not integer! Try again") return result
bc25845dee1be3c0416a36ea0527d641136f9ac5
2,804
import requests def get_short_token(app_id, app_secret, redirect_url, auth_code): """Get a short-lived access token.""" url = f"{OAUTH_URL}/access_token" payload = { "client_id": app_id, "client_secret": app_secret, "grant_type": "authorization_code", "redirect_uri": redirect_url, "code": auth_code, } resp = requests.post(url, data=payload).json() return resp["access_token"]
57d260876a19a9a7f52da66069c34f5223abcf19
2,805
import random def random_chinese_name(): """生成随机中文名字,二到三字 Returns: str: 随机名字 """ long = random.randint(2, 3) first_name = random.choice(FIRST_NAME) last_name = random.choice(LAST_NAME) if long == 2 else "{}{}".format(random.choice(LAST_NAME), random.choice(LAST_NAME)) name = first_name + last_name return name
863bc1a72d0ba28916e61f62c6c6a26da9c34f7a
2,806
def generate_json_with_incorrect_prediction_value(features_definition: dict): """ Generates a list of dictonaries with keys from the given features_definitions, key in the dictionary has a corresponding value not allowed by the given definition """ mock_requests = [] def_keys = list(features_definition.keys()) for def_key in def_keys: mock_request = {key: list(value.keys())[0] for key, value in features_definition.items()} # Replace given keys, based on enumeration step, value with invalid prediction value mock_request[def_key] = 'q' mock_requests.append(mock_request) return mock_requests
a0019822fbc701e8cdda61192bf564d1f72af9dd
2,807
import os def extract_text(file: UploadFile = File(...), lang: str = "eng", text_only: bool = False, custom_config: str = None): """ :param file: :param lang: available: deu, eng :return: """ filepath = "temp/" + file.filename with file.file: with open(filepath, "wb") as temp_file: temp_file.write(file.file.read()) # preprocess_image(filepath) if custom_config is None: custom_config = '--oem 3' if text_only: output = bytes(pytesseract.image_to_string(filepath, lang=lang, config=custom_config), encoding="utf-8") response = PlainTextResponse(content=output) else: output = pytesseract.image_to_pdf_or_hocr(filepath, lang=lang, extension='hocr', config=custom_config) extracted = xmltodict.parse(output) response = hocr_to_simple_json(extracted, lang) os.remove(filepath) return response
303eaf64baa591a3bc803f3eff85e405216fcb56
2,808
from pathlib import Path def split_data( args, data_paths: t.List[Path], val_ratio: float = 0.20, test_ratio: float = 0.10, random_state: int = 42, ) -> (t.List[str], t.List[str], t.List[str]): """ Split the data into train, val and test and save the splits to file. Args: args data_paths: list of list of scan paths in H5 file e.g. [ [scan1_FLAIR, scan1_T1, scan1_T2] ... ] val_ratio: validation set ratio test_ratio: test set ratio random_state: random state to be passed Returns: train_paths: list of scan paths for training val_paths: list of scan paths for validation test_paths: list of scan paths for testing """ test_size = int(len(data_paths) * test_ratio) val_size = int(len(data_paths) * val_ratio) train_size = len(data_paths) - val_size - test_size data_paths = np.asarray(data_paths) # shuffle indexes rng = np.random.default_rng(random_state) indexes = np.arange(len(data_paths)) rng.shuffle(indexes) # split data into train validation and test set train_paths = data_paths[indexes[:train_size]] val_paths = data_paths[indexes[train_size:train_size + val_size]] test_paths = data_paths[indexes[train_size + val_size:]] if not args.merge_scan_type: # treat each scan type separately train_paths = train_paths.flatten() val_paths = val_paths.flatten() test_paths = test_paths.flatten() return train_paths.tolist(), val_paths.tolist(), test_paths.tolist()
2f9a0c0b3a90ca8a208d0afc91cc9fc8afcfb0ee
2,809
def check_chains(sampler, pos, theta_lb, theta_ub, mode_list=['bounds']): """ check chains 1> reset out-of-bound chains 2> reset all chains to max likelihood neighbours """ mode_all = ['bounds', 'reset_all'] for mode in mode_list: assert mode in mode_all n_walkers, n_step, n_dim = sampler.chain.shape # state of each chain state = np.ones((n_walkers,), dtype=np.bool) # the best position pos_best = sampler.flatchain[np.argsort(sampler.flatlnprobability)[-1]] # 'bounds' : chain pos should be between theta_lb, theta_ub if 'bounds' in mode_list: state = np.logical_and(state, np.array( [theta_between(pos[i], theta_lb, theta_ub) for i in range(n_walkers)])) # 'reset_all' : reset all chains if 'reset_all' in mode_list: state = np.logical_and(state, np.zeros((n_walkers,), dtype=np.bool)) # determine new pos pos_new = [] for i, state_ in enumerate(state): if not state_: # state_ = False, reset pos_new.append(pos_best + np.random.uniform(-1, 1, size=pos_best.shape) * 1.e-3) else: pos_new.append(pos[i]) return np.array(pos_new), state, pos_best
ce0ccc2b9ab3ace56daf4e9dcfd54a4f845c0ca5
2,810
def get_index_train_test_path(_DATA_DIRECTORY_PATH, split_num, train = True): """ Method to generate the path containing the training/test split for the given split number (generally from 1 to 20). @param split_num Split number for which the data has to be generated @param train Is true if the data is training data. Else false. @return path Path of the file containing the requried data """ if train: return _DATA_DIRECTORY_PATH + "index_train_" + str(split_num) + ".txt" else: return _DATA_DIRECTORY_PATH + "index_test_" + str(split_num) + ".txt"
7de884bd63417ad91d83703dc2cebe4f70629315
2,811
def read_output(): """Reads the complex values from output file sink generated by gnuradio expt 2""" complex_output = np.fromfile(file_sink_complex_expt2, dtype = 'complex64').reshape(-1,1) plt.figure() plt.plot(complex_output[11:18000].real) plt.plot(complex_output[11:18000].imag) plt.savefig('complex_output.png') plt.close('all') return complex_output
e472dc0615c548aa3839540aeec7c2a30361bd49
2,812
def get_wikipedia_pages_by_list(titles_or_page_ids): """ Get Wikipedia pages using list of titles or page ids. @param titles_or_page_ids: List of titles or page ids. @return: List of pages. >>> titles_or_page_ids = 'Aromatics_byggnad' >>> pages = get_wikipedia_pages_by_list(titles_or_page_ids) >>> pages[0]['pageid'] 4868947 >>> titles_or_page_ids = ['Aromatics_byggnad'] >>> pages = get_wikipedia_pages_by_list(titles_or_page_ids) >>> pages[0]['pageid'] 4868947 >>> titles_or_page_ids = ['Dragontorpet Abrahamsberg', 'Farfadern'] >>> pages = get_wikipedia_pages_by_list(titles_or_page_ids) >>> pages[0]['pageid'] 3879445 >>> titles_or_page_ids = [1160607, 3879445] >>> pages = get_wikipedia_pages_by_list(titles_or_page_ids) >>> pages[0]['pageid'] 3879445 """ # Function for splitting a list into smaller lists, see # http://stackoverflow.com/questions/752308/split-list-into-smaller-lists split_list = lambda l, n=WIKIPEDIA_REQUEST_MAX_PAGES: [l[:]] if len(l) <= n else [l[i:i+n] for i in range(0, len(l), n)] if isinstance(titles_or_page_ids, str): titles_or_page_ids = [titles_or_page_ids] titles_or_page_ids = split_list(titles_or_page_ids, WIKIPEDIA_REQUEST_MAX_PAGES) pages = [] for values in titles_or_page_ids: if all([isinstance(v, str) for v in values]): results = get_wikipedia_page('titles', '|'.join(values)) else: results = get_wikipedia_page('pageids', '|'.join(map(str, values))) pages.extend(results['query']['pages'].values()) return pages # TODO: What about 'continue'...
37ebb4747e6ecfbaa3da7cbb8be04619f0297e89
2,813
import warnings def probit(s: pd.Series, error: str = "warn") -> pd.Series: """ Transforms the Series via the inverse CDF of the Normal distribution. Each value in the series should be between 0 and 1. Use `error` to control the behavior if any series entries are outside of (0, 1). >>> import pandas as pd >>> import janitor >>> s = pd.Series([0.1, 0.5, 0.8], name="numbers") >>> s.probit() 0 -1.281552 1 0.000000 2 0.841621 dtype: float64 :param s: Input Series. :param error: Determines behavior when `s` is outside of `(0, 1)`. If `'warn'` then a `RuntimeWarning` is thrown. If `'raise'`, then a `RuntimeError` is thrown. Otherwise, nothing is thrown and `np.nan` is returned for the problematic entries; defaults to `'warn'`. :raises RuntimeError: Raised when there are problematic values in the Series and `error='raise'`. :return: Transformed Series """ s = s.copy() outside_support = (s <= 0) | (s >= 1) if (outside_support).any(): msg = f"{outside_support.sum()} value(s) are outside of (0, 1)" if error.lower() == "warn": warnings.warn(msg, RuntimeWarning) if error.lower() == "raise": raise RuntimeError(msg) else: pass s[outside_support] = np.nan with np.errstate(all="ignore"): out = pd.Series(norm.ppf(s), index=s.index) return out
0f12c007da3ebc78b5f6cf073b4637dfae360a26
2,814
import json def make_poem(token_nums, df, new_rowi): """ should return a series to be put at the end of the dataframe Having a list in a df cell is apparently a pain so words are joined with "_" """ print(token_nums) words = df.iloc[token_nums,0].to_list() words_out = [] for word in words: print(word) if "_" in word: j = word.rsplit("_") print(j) words_out = words_out + j else: words_out.append(word) print(words_out) tts.make_svg(words_out, str(new_rowi)) file_hash = tts.ipfs_upload(f"pics/{new_rowi}.svg") tts.pin(file_hash, f"{new_rowi}.svg") ser = pd.DataFrame({"text": "_".join(words_out), "numbers": new_rowi, "ipfs": file_hash, "metadata": f"metadata/meta-{new_rowi}.json", "policy_id" :"<policyID>", "sold" : 1, "pic" : f"pics/{new_rowi}.svg"}, index = [new_rowi]) d = tts.build_ingredient_dict("<policyID>", words_out, new_rowi, f"{file_hash}") with open(f"metadata/meta-{new_rowi}.json", 'w') as f: json.dump(d, f) return ser
1f950134e766fa701d5800ac4bcc5d7aca1847e5
2,815
def merge_multilinestrings(network): """Try to merge all multilinestring geometries into linestring geometries. Args: network (class): A network composed of nodes (points in space) and edges (lines) Returns: network (class): A network composed of nodes (points in space) and edges (lines) """ edges = network.edges.copy() edges['geometry']= edges.geometry.apply(lambda x: merge_multilinestring(x)) return Network(edges=edges, nodes=network.nodes)
624ffd9f25af378451c087a9c9b11af25f06b7bc
2,816
import os def _read_yaml_definition(uarchdefs, path): """ :param uarchdefs: :param path: """ uarchdef = read_yaml(os.path.join(path, "microarchitecture.yaml"), SCHEMA) uarchdef["Path"] = path uarchdefs.append(uarchdef) _read_uarch_extensions(uarchdefs, path) baseuarch = read_yaml(DEFAULT_UARCH, SCHEMA) baseuarch["Path"] = DEFAULT_UARCH uarchdefs.append(baseuarch) complete_uarchdef = {} uarchdefs.reverse() for uarchdef in uarchdefs: for key, val in uarchdef.items(): if not isinstance(val, dict): complete_uarchdef[key] = uarchdef[key] else: override = val.get("Override", False) if key not in complete_uarchdef: complete_uarchdef[key] = {} for key2 in val: if key2 in ["YAML", "Modules", "Path"]: if key2 not in complete_uarchdef[key]: complete_uarchdef[key][key2] = [] if os.path.isabs(val[key2]): if override: complete_uarchdef[key][key2] = [val[key2]] else: complete_uarchdef[key][key2].append(val[key2]) else: if override: complete_uarchdef[key][key2] = [ os.path.join( uarchdef["Path"], val[key2] ) ] else: complete_uarchdef[key][key2].append( os.path.join( uarchdef["Path"], val[key2] ) ) elif key2 == "Module": if val[key2].startswith("microprobe"): val[key2] = os.path.join( os.path.dirname(__file__), "..", "..", "..", val[key2] ) if os.path.isabs(val[key2]): complete_uarchdef[key][key2] = val[key2] else: complete_uarchdef[key][key2] = os.path.join( uarchdef["Path"], val[key2] ) else: complete_uarchdef[key][key2] = val[key2] return complete_uarchdef
ff0f0a860483dc05b5386de050e16509fa01fc03
2,817
from datetime import datetime def timestamp_old (): """ store timestamp field """ timestamp = {} timestamp['timestamp'] = False try: today = datetime.datetime.now() # print('Timestamp: {:%Y-%m-%d %H:%M:%S}'.format(today)) timestamp['timestamp'] = "{:%Y-%m-%d %H:%M:%S}".format(today) except Exception as e: print ("Failure in getting time:", e) return timestamp
19638d49ff148d93ab9d6a2b2bebedf59899b71a
2,818
import os def download_model(model: str, saving_directory: str = None) -> str: """ Function that loads pretrained models from AWS. :param model: Name of the model to be loaded. :param saving_directory: RELATIVE path to the saving folder (must end with /). Return: - Path to model checkpoint. """ if saving_directory is None: saving_directory = get_cache_folder() if not saving_directory.endswith("/"): saving_directory += "/" if not os.path.exists(saving_directory): os.makedirs(saving_directory) if os.path.isdir(saving_directory + model): logger.info(f"{model} is already in cache.") if not model.endswith("/"): model += "/" elif model not in available_metrics.keys(): raise Exception( f"{model} is not in the `availale_metrics` or is a valid checkpoint folder." ) elif available_metrics[model].startswith("https://"): download_file_maybe_extract( available_metrics[model], directory=saving_directory ) else: raise Exception("Invalid model name!") # CLEAN Cache if os.path.exists(saving_directory + model + ".zip"): os.remove(saving_directory + model + ".zip") if os.path.exists(saving_directory + model + ".tar.gz"): os.remove(saving_directory + model + ".tar.gz") if os.path.exists(saving_directory + model + ".tar"): os.remove(saving_directory + model + ".tar") checkpoints_folder = saving_directory + model + "/checkpoints" checkpoints = [ file for file in os.listdir(checkpoints_folder) if file.endswith(".ckpt") ] checkpoint = checkpoints[-1] checkpoint_path = checkpoints_folder + "/" + checkpoint return checkpoint_path
7520b78d1913ef275aecdd4f0b3c07ea5a164ef4
2,819
from datetime import datetime def get_series(currency_id: str, interval: str) -> pd.DataFrame: """ Get the time series for the given currency_id. Timestamps and dates are given in UTC time. """ url = f"https://api.coincap.io/v2/assets/{currency_id}/history" js = request_and_jsonize_calm(url, params={'interval': interval}) times, prices, dates = [], [], [] for measurement in js['data']: timestamp_seconds = float(measurement['time']) // 1000 times.append(timestamp_seconds) # Timestamp is in milliseconds prices.append(float(measurement['priceUsd'])) dates.append(datetime.fromtimestamp(timestamp_seconds)) df = pd.DataFrame( { 'date': dates, 'time': times, 'price': prices } ) return df
e2420de9d35c0eb7f5b408bfc520d587c601c5ca
2,820
import html def formatTitle(title): """ The formatTitle function formats titles extracted from the scraped HTML code. """ title = html.unescape(title) if(len(title) > 40): return title[:40] + "..." return title
0a47e88ac024561dce18be140895dfd0825a9c37
2,821
def isPalindrome(x): """ :type x: int :rtype: bool """ def sub_judge(start, end, string): if start >= end: return True if string[start] == string[end]: return sub_judge(start + 1, end - 1, string) else: return False return sub_judge(0, len(str(x)) - 1, str(x))
c7ecea3934e1cceb6574630eb06703f18f02832a
2,822
def count_partitions(n, m): """Count the partitions of n using parts up to size m. >>> count_partitions(6, 4) 9 >>> count_partitions(10, 10) 42 """ if n == 0: return 1 elif n < 0: return 0 elif m == 0: return 0 else: with_m = count_partitions(n-m, m) without_m = count_partitions(n, m-1) return with_m + without_m
941984ffd1912ff66fd6a006ccc2bc58fc41eaa8
2,823
import re def parse_pairs(string): """ Converts string where are data wrote using such method: Key: Value To dictionary where "Key" is key and "Value" is value. If there's newline, space and dot or text - that must be added to previous value. :param string: string that contains data to convert :return: :raises Exception """ pairs = {} last_key = None for line in string.split('\n'): # If line is continuing of previous value - add it if re.match('( [^\n]+| \\.)', line) is not None: pairs[last_key] += '\n' + line else: # Regexp passes: # Key: Value # abc: DEF # Won't pass: # a adn dsj jsd dsi ads pf match = re.match('([^:]+): ([^\n]+)', line) if match is not None: pairs.update({match.group(1): match.group(2)}) last_key = match.group(1) elif not re.match('\\s+|', line): raise IllegalFormatException("Line\n%s\nDoesn't match patterns " "\"([^:]+): ([^\\n]+) and \"( [^\\n]+| \\.)\"!" % line) return pairs
95e46b7a6abf8885630f6151b7fde380c6bc0fcf
2,824
def check_yum_package(package_name, logger): """ check if a yum package is installed :param package_name: name to be checked :param logger: rs log obj :return: boolean """ logger.trace("Checking if package '{}' is installed.", package_name) command = "yum list installed {}".format(package_name) try: execute_in_bash(command, logger) except: logger.trace("Package '{}' is not installed.", package_name) return False logger.trace("Package '{}' is already installed.", package_name) return True
777f041d03279fd2a8e8a4dcfa1c8e5df9b42b44
2,825
def get(filename, name): """ Read a given element from an SVG file """ root = etree.parse(filename).getroot() return root.xpath("//*[@id='%s']" % name)[0].get("d")
b2ef579ab1521ebacda940818caa27a341d049dd
2,826
def softmaxCostAndGradient(predicted, target, outputVectors, dataset): """ Softmax cost function for word2vec models Implement the cost and gradients for one predicted word vector and one target word vector as a building block for word2vec models, assuming the softmax prediction function and cross entropy loss. Arguments: predicted -- numpy ndarray, predicted word vector (\hat{v} in the written component) target -- integer, the index of the target word outputVectors -- "output" vectors (as rows) for all tokens dataset -- needed for negative sampling, unused here. Return: cost -- cross entropy cost for the softmax word prediction gradPred -- the gradient with respect to the predicted word vector grad -- the gradient with respect to all the other word vectors We will not provide starter code for this function, but feel free to reference the code you previously wrote for this assignment! """ ### YOUR CODE HERE # print("+++++++++++++++++++++ softmaxCostAndGradient +++++++++++++++++++++++") # print("The shape of predicted(v_c) is {}, which means each word is presented by {} dims.".format(predicted.shape, predicted.shape[0])) # print("target(o)'s type is {}, and it's value is {},the u_o now is u_target.".format(type(target), target)) # print("The shape of outputVectors(u_w) is {}, which means we have {} words.".format(outputVectors.shape, outputVectors.shape[0])) y_hat = softmax(np.matmul(outputVectors, predicted)) # print("y_hat is{}.".format(y_hat)) # print("Then we should minus 1 at the location at {}".format(target+1)) cost = -np.log(y_hat[target]) y_hat[target] = y_hat[target] - 1 dy = y_hat.copy() # print("so we can get the dy:{}".format(y_hat)) # print("To get the gradPred, according to the wirte solution what we should know the shapes of dy{} and outputVectors{}". # format(dy.shape, outputVectors.shape)) gradPred = np.matmul(dy.T, outputVectors) # print("we can get the gradPred easily in shape{}".format(gradPred.shape)) # print("To get the grad, according to the wirte solution what we should know the shapes of dy{} and predicted{}". # format(dy.shape, predicted.shape)) grad = np.outer(dy, predicted) # print("we can get the grad easily in shape{}".format(grad.shape)) # print("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++") ### END YOUR CODE return cost, gradPred, grad
a7215d75a58bcc19d2b779769b74bcb96beb8d6c
2,827
def get_osf_meta_schemas(): """Returns the current contents of all known schema files.""" schemas = [ ensure_schema_structure(from_json(json_filename)) for json_filename in OSF_META_SCHEMA_FILES ] return schemas
cf3a36b589885faebc1ec509ec9a45470ad1efef
2,828
def update(request, bleep_id): """ Process a bleep form update """ if request.method == 'POST': form = BleepForm(request.POST) if form.is_valid(): # Process and clean the data # ... # update the form with current bleep data b = Bleep.objects.get(pk=bleep_id) form = BleepForm(request.POST, instance=b) form.save() return HttpResponseRedirect('/bleeps/'+bleep_id) else: form = BleepForm() # Create an unbound form return render_to_response('bleep/form.html', { 'form': form, 'content_form': CommentForm()}, context_instance=RequestContext(request))
eee08d0c589d3ff1c58e1deb66a1d0b10408785e
2,829
import email import binascii def decode_header(header): """Decode a message header value without converting charset. Returns a list of (decoded_string, charset) pairs containing each of the decoded parts of the header. Charset is None for non-encoded parts of the header, otherwise a lower-case string containing the name of the character set specified in the encoded string. An email.errors.HeaderParseError may be raised when certain decoding error occurs (e.g. a base64 decoding exception). """ header = str(header) if not ecre.search(header): return [(header, None)] else: decoded = [] dec = '' for line in header.splitlines(): if not ecre.search(line): decoded.append((line, None)) continue parts = ecre.split(line) while parts: unenc = parts.pop(0).strip() if unenc: if decoded and decoded[-1][1] is None: decoded[-1] = ( decoded[-1][0] + SPACE + unenc, None) else: decoded.append((unenc, None)) if parts: charset, encoding = [ s.lower() for s in parts[0:2] ] encoded = parts[2] dec = None if encoding == 'q': dec = email.quoprimime.header_decode(encoded) elif encoding == 'b': paderr = len(encoded) % 4 if paderr: encoded += '==='[:4 - paderr] try: dec = email.base64mime.decode(encoded) except binascii.Error: raise HeaderParseError if dec is None: dec = encoded if decoded and decoded[-1][1] == charset: decoded[-1] = ( decoded[-1][0] + dec, decoded[-1][1]) else: decoded.append((dec, charset)) del parts[0:3] return decoded
a8cddb9cf196efd4715511418414cc429dd54fe7
2,830
def user_from_identity(): """Returns the User model object of the current jwt identity""" username = get_jwt_identity() return User.query.filter(User.username == username).scalar()
632a3ed7ee047c6358582a60b1d39a4cca97eb7e
2,831
from typing import Set from re import T def combine(first: Set[T], second: Set[T]) -> Set[T]: """Combine two sets of tuples, prioritising the second.""" result = second.copy() for pf in first: include = True for pr in result: if pf[0] == pr[0]: include = False break if pf[1] == pr[1]: include = False break if include: result.add(pf) return result
e28b6884b63c055c1224ffc7d19613581f6cacc8
2,832
def minsize<VAL1>(event, context): """ AutoScalingGroup起動台数調整 """ """ Create Connection """ try: client = boto3.client('autoscaling', region_name = '<Region>') except: print('Connection Error') return 1 """ Update AutoScalingGroup """ try: client.update_auto_scaling_group(AutoScalingGroupName = '<AutoScalingGroup>', MinSize = <VAL1>, DesiredCapacity = <VAL1>) except: print('Update AutoScalingGroup Error') return 1 return 0
8eccb127ec7f1468706b80af83ccd6641460f22a
2,833
def thin(image, n_iter=None): """ Perform morphological thinning of a binary image Parameters ---------- image : binary (M, N) ndarray The image to be thinned. n_iter : int, number of iterations, optional Regardless of the value of this parameter, the thinned image is returned immediately if an iteration produces no change. If this parameter is specified it thus sets an upper bound on the number of iterations performed. Returns ------- out : ndarray of bools Thinned image. See also -------- skeletonize Notes ----- This algorithm [1]_ works by making multiple passes over the image, removing pixels matching a set of criteria designed to thin connected regions while preserving eight-connected components and 2 x 2 squares [2]_. In each of the two sub-iterations the algorithm correlates the intermediate skeleton image with a neighborhood mask, then looks up each neighborhood in a lookup table indicating whether the central pixel should be deleted in that sub-iteration. References ---------- .. [1] Z. Guo and R. W. Hall, "Parallel thinning with two-subiteration algorithms," Comm. ACM, vol. 32, no. 3, pp. 359-373, 1989. .. [2] Lam, L., Seong-Whan Lee, and Ching Y. Suen, "Thinning Methodologies-A Comprehensive Survey," IEEE Transactions on Pattern Analysis and Machine Intelligence, Vol 14, No. 9, September 1992, p. 879 Examples -------- >>> square = np.zeros((7, 7), dtype=np.uint8) >>> square[1:-1, 2:-2] = 1 >>> square[0,1] = 1 >>> square array([[0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0]], dtype=uint8) >>> skel = bwmorph_thin(square) >>> skel.astype(np.uint8) array([[0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], dtype=uint8) """ return _bwmorph_luts(image, THIN_LUTS, n_iter=n_iter)
d554d56c3a0146fb487d2b3a1f4e8a3f033e3559
2,834
def rings(xgr): """ rings in the graph (minimal basis) """ xgrs = [bond_induced_subgraph(xgr, bnd_keys) for bnd_keys in rings_bond_keys(xgr)] return tuple(sorted(xgrs, key=frozen))
288437236a6c5367a67fffb0a8d73c01b7864d67
2,835
def prior_search(binary, left_fit, right_fit, margin=50): """ searches within the margin of previous left and right fit indices Parameters: binary: np.ndarray, binary image from the video left_fit: list, left line curve fitting coefficients right_fit: list, right line curve fitting coefficients margin: int, margin to search lane for Returns: left_fitx: list, left line x indices right_fitx: list, right line x indices ploty: y indices for curve fitting left_fit: list, left line curve fitting coefficients right_fit: list, right line curve fitting coefficients """ nonzero = binary.nonzero() nonzeroy = np.array(nonzero[0]) nonzerox = np.array(nonzero[1]) left_lane_indices = ( nonzerox > ( left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] - margin ) ) & ( nonzerox < ( left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] + margin ) ) right_lane_indices = ( nonzerox > ( right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] - margin ) ) & ( nonzerox < ( right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] + margin ) ) leftx = nonzerox[left_lane_indices] lefty = nonzeroy[left_lane_indices] rightx = nonzerox[right_lane_indices] righty = nonzeroy[right_lane_indices] left_fit = np.polyfit(lefty, leftx, 2) right_fit = np.polyfit(righty, rightx, 2) ploty = np.linspace(0, binary.shape[0] - 1, binary.shape[0]) left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2] right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2] return left_fitx, right_fitx, ploty, left_fit, right_fit
60a5838fef6a56060471dcfd54ebb4ebb43acbb4
2,836
import tqdm def _parallel_iter(par, iterator): """ Parallelize a partial function and return results in a list. :param par: Partial function. :param iterator: Iterable object. :rtype: list :return: List of results. """ pool = mp.Pool(processes=mp.cpu_count(), maxtasksperchild=1) output = [] for thread_output in tqdm.tqdm(pool.imap_unordered(par, iterator)): if thread_output: output.extend(thread_output) pool.close() pool.join() return output
e71a2c91421fc6abd10b48787eec2c4de5ccd03b
2,837
def reassign_labels(class_img, cluster_centers, k=3): """Reassigns mask labels of t series based on magnitude of the cluster centers. This assumes land will always be less than thin cloud which will always be less than thick cloud, in HOT units""" idx = np.argsort(cluster_centers.sum(axis=1)) lut = np.zeros_like(idx) lut[idx] = np.arange(k) return lut[class_img]
bff2e9e1e0a9db4b7bd59e8e84ac0689e1947e1f
2,838
import argparse def parse_args(): """parse args with argparse :returns: args """ parser = argparse.ArgumentParser(description="Daily Reddit Wallpaper") parser.add_argument("-s", "--subreddit", type=str, default=config["subreddit"], help="Example: art, getmotivated, wallpapers, ...") parser.add_argument("-t", "--time", type=str, default=config["time"], help="Example: new, hour, day, week, month, year") parser.add_argument("-n", "--nsfw", action='store_true', default=config["nsfw"], help="Enables NSFW tagged posts.") parser.add_argument("-d", "--display", type=int, default=config["display"], help="Desktop display number on OS X (0: all displays, 1: main display, etc") parser.add_argument("-o", "--output", type=str, default=config["output"], help="Set the outputfolder in the home directory to save the Wallpapers to.") args = parser.parse_args() return args
deb48dd5760b4d640a132518f5886d39a7b54b8e
2,839
def gridarray(a, b): """ Given two arrays create an array of all possible pairs, a 2d grid. E.g. a = [1, 2], b = [2, 4, 5], gridarray(a,b) = [[1,2], [1,4], [1,5], [2,2], [2,4], [2,5]]. May be used repeatedly for increasing dimensionality. DEPRECIATED: Use A, B = np.meshgrid(a, b). Note that meshgrid works with arbetrary dimension too. """ if a == None: return b # Trivial cases if b == None: return a adim, bdim = 1, 1 if a.ndim > 1: adim = a.shape[1] if b.ndim > 1: bdim = b.shape[1] ab = np.zeros((a.shape[0] * b.shape[0], adim + bdim), dtype=a.dtype) count = 0 for aa in a: for bb in b: ab[count, 0:adim] = aa ab[count, adim:] = bb count = count + 1 return ab
5320655c718cd5be0d2321079fb1d77719ac1b39
2,840
import unicodedata def has_alphanum(s): """ Return True if s has at least one alphanumeric character in any language. See https://en.wikipedia.org/wiki/Unicode_character_property#General_Category """ for c in s: category = unicodedata.category(c)[0] if category == 'L' or category == 'N': return True return False
3ac778e5f415bce4fa1e8667a1599ca73367b733
2,841
import os def src_path_join(*kwargs): """ reutrns path to the file whose dir information are provided in kwargs similar to `os.path.join` :param kwargs: :return: """ return os.path.join(get_src_dir(), *kwargs)
fe34197a5da000b0d9e86db6c3596c6e38ab32df
2,842
def get_values_heatmap(entity, measurement, case_id, categorical_filter, categorical, numerical_filter_name, from1, to1, measurement_filter, date, r): """ Get numerical values from numerical table from database get_values use in heatmap, clustering r: connection with database Returns ------- df: DataFrame with columns Name_ID,entity1,entity2,... """ entity_fin = "$$" + "$$,$$".join(entity) + "$$" if not categorical_filter and not case_id and not numerical_filter_name: sql = """SELECT "Name_ID","measurement","Key",AVG(f."Value") as "Value" FROM examination_numerical, unnest("Value") as f("Value") WHERE "Key" IN ({0}) and "measurement" in ('{1}') and "Date" Between '{2}' and '{3}' Group by "Name_ID","measurement","Key" """.format(entity_fin, measurement, date[0], date[1]) else: df = filtering(case_id, categorical_filter, categorical, numerical_filter_name, from1, to1, measurement_filter) sql = """SELECT en."Name_ID","measurement","Key",AVG(f."Value") as "Value" FROM examination_numerical as en right join ({4}) as df on en."Name_ID" = df."Name_ID" , unnest("Value") as f("Value") WHERE "Key" IN ({0}) and "measurement" in ('{1}') and "Date" Between '{2}' and '{3}' Group by en."Name_ID","measurement","Key" """.format(entity_fin, measurement, date[0], date[1],df) try: df = pd.read_sql(sql, r) df = df.pivot_table(index=["Name_ID"], columns="Key", values="Value", aggfunc=np.mean).reset_index() if df.empty or len(df) == 0: return df, "The entity wasn't measured" else: return df, None except Exception: return None, "Problem with load data from database"
f3eb7b422f2dd41b5dd2e1378a41cc915f02c34e
2,843
from typing import Dict from typing import List from typing import Set from typing import Tuple from typing import Optional from typing import Union from typing import KeysView import asyncio from datetime import datetime async def unwrap_pull_requests(prs_df: pd.DataFrame, precomputed_done_facts: PullRequestFactsMap, precomputed_ambiguous_done_facts: Dict[str, List[int]], with_jira: bool, branches: pd.DataFrame, default_branches: Dict[str, str], bots: Set[str], release_settings: ReleaseSettings, logical_settings: LogicalRepositorySettings, prefixer: Prefixer, account: int, meta_ids: Tuple[int, ...], mdb: Database, pdb: Database, rdb: Database, cache: Optional[aiomcache.Client], resolve_rebased: bool = True, repositories: Optional[Union[Set[str], KeysView[str]]] = None, ) -> Tuple[List[MinedPullRequest], PRDataFrames, PullRequestFactsMap, Dict[str, ReleaseMatch], Optional[asyncio.Task]]: """ Fetch all the missing information about PRs in a dataframe. :param prs_df: dataframe with PullRequest-s. :param precomputed_done_facts: Preloaded precomputed facts of done PRs (explicit). :param precomputed_ambiguous_done_facts: Preloaded precomputed facts of done PRs (implicit). :param with_jira: Value indicating whether to load the mapped JIRA issues. :param branches: Branches of the relevant repositories. :param default_branches: Default branches of the relevant repositories. :param release_settings: Account's release settings. :param account: State DB account ID. :param meta_ids: GitHub account IDs. :param mdb: Metadata DB. :param pdb: Precomputed DB. :param cache: Optional memcached client. :return: Everything that's necessary for PullRequestListMiner. """ if prs_df.empty: async def noop(): return {} return ( [], PRDataFrames(*(pd.DataFrame() for _ in dataclass_fields(PRDataFrames))), {}, {}, asyncio.create_task(noop(), name="noop"), ) if repositories is None: repositories = logical_settings.all_logical_repos() if resolve_rebased: dags = await fetch_precomputed_commit_history_dags( prs_df[PullRequest.repository_full_name.name].unique(), account, pdb, cache) dags = await fetch_repository_commits_no_branch_dates( dags, branches, BRANCH_FETCH_COMMITS_COLUMNS, True, account, meta_ids, mdb, pdb, cache) prs_df = await PullRequestMiner.mark_dead_prs( prs_df, branches, dags, meta_ids, mdb, PullRequest) facts, ambiguous = precomputed_done_facts, precomputed_ambiguous_done_facts PullRequestMiner.adjust_pr_closed_merged_timestamps(prs_df) now = datetime.now(timezone.utc) if rel_time_from := prs_df[PullRequest.merged_at.name].nonemin(): milestone_prs = prs_df[[PullRequest.merge_commit_sha.name, PullRequest.merge_commit_id.name, PullRequest.merged_at.name, PullRequest.repository_full_name.name]] milestone_prs.columns = [ Release.sha.name, Release.commit_id.name, Release.published_at.name, Release.repository_full_name.name, ] milestone_releases = dummy_releases_df().append(milestone_prs.reset_index(drop=True)) milestone_releases = milestone_releases.take(np.where( milestone_releases[Release.sha.name].notnull())[0]) releases, matched_bys = await ReleaseLoader.load_releases( prs_df[PullRequest.repository_full_name.name].unique(), branches, default_branches, rel_time_from, now, release_settings, logical_settings, prefixer, account, meta_ids, mdb, pdb, rdb, cache) add_pdb_misses(pdb, "load_precomputed_done_facts_reponums/ambiguous", remove_ambiguous_prs(facts, ambiguous, matched_bys)) tasks = [ load_commit_dags( releases.append(milestone_releases), account, meta_ids, mdb, pdb, cache), # not nonemax() here! we want NaT-s inside load_merged_unreleased_pull_request_facts MergedPRFactsLoader.load_merged_unreleased_pull_request_facts( prs_df, releases[Release.published_at.name].max(), LabelFilter.empty(), matched_bys, default_branches, release_settings, prefixer, account, pdb), ] dags, unreleased = await gather(*tasks) else: releases, matched_bys, unreleased = dummy_releases_df(), {}, {} dags = await fetch_precomputed_commit_history_dags( prs_df[PullRequest.repository_full_name.name].unique(), account, pdb, cache) for k, v in unreleased.items(): if k not in facts: facts[k] = v dfs, _, _ = await PullRequestMiner.mine_by_ids( prs_df, unreleased, repositories, now, releases, matched_bys, branches, default_branches, dags, release_settings, logical_settings, prefixer, account, meta_ids, mdb, pdb, rdb, cache, with_jira=with_jira) deployment_names = dfs.deployments.index.get_level_values(1).unique() deployments_task = asyncio.create_task(_load_deployments( deployment_names, facts, logical_settings, prefixer, account, meta_ids, mdb, pdb, rdb, cache), name=f"load_included_deployments({len(deployment_names)})") dfs.prs = split_logical_repositories(dfs.prs, dfs.labels, repositories, logical_settings) prs = await list_with_yield(PullRequestMiner(dfs), "PullRequestMiner.__iter__") filtered_prs = [] with sentry_sdk.start_span(op="PullRequestFactsMiner.__call__", description=str(len(prs))): facts_miner = PullRequestFactsMiner(bots) pdb_misses = 0 for pr in prs: node_id, repo = \ pr.pr[PullRequest.node_id.name], pr.pr[PullRequest.repository_full_name.name] if (node_id, repo) not in facts: try: facts[(node_id, repo)] = facts_miner(pr) except ImpossiblePullRequest: continue finally: pdb_misses += 1 filtered_prs.append(pr) set_pdb_hits(pdb, "fetch_pull_requests/facts", len(filtered_prs) - pdb_misses) set_pdb_misses(pdb, "fetch_pull_requests/facts", pdb_misses) if deployments_task is not None: await deployments_task deployments_task = deployments_task.result() return filtered_prs, dfs, facts, matched_bys, deployments_task
9ad6db247a3b3ccf2b6fcf5573195035705613c8
2,844
import collections def load_images(image_files, resize=True): """Load images from files and optionally resize it.""" images = [] for image_file in image_files: with file_io.FileIO(image_file, 'r') as ff: images.append(ff.read()) if resize is False: return images # To resize, run a tf session so we can reuse 'decode_and_resize()' # which is used in prediction graph. This makes sure we don't lose # any quality in prediction, while decreasing the size of the images # submitted to the model over network. image_str_tensor = tf.placeholder(tf.string, shape=[None]) image = tf.map_fn(resize_image, image_str_tensor, back_prop=False) feed_dict = collections.defaultdict(list) feed_dict[image_str_tensor.name] = images with tf.Session() as sess: images_resized = sess.run(image, feed_dict=feed_dict) return images_resized
e73c05e4718d1a67b6dea07650f9df3406c76daf
2,845
def schemaGraph (ds, ns, ontology_uri=None): """ schemaGraph (datasource, namespace, [ontology_uri,]) Return an RDF graph filled with axioms describing the datasource. @param ds: the DataSource whose schema has to be converted @param ns: the namespace uri of the created classes and properties @param ontology_uri if not given, the namespace uri is used @see: L{cross.datasource} """ # naming scheme: # t-tablename : table class # c-tablename.columnname : column property # _ic-tablename.columnname : inverse column property # _vc-tablename.columnname.simple_val : column-value instance # dc-tablename.columnname : column-data property # nc-tablename.columnname : null-column class # i-tablename.indexname : index property # _ii-tablename.indexname : inverse index property # _vi-tablename.indexname.tuple_val : index-value instance # di-tablename.indexname : index-data property # ni-tablename.indexname : null-index class # f-tablename.foreignkeyname : foreign-key property # _vf-tablename.foreignkeyname.tuple_val : foreign-key-value instance # df-tablename.foreignkeyname : foreign-key property # nf-tablename.foreignkeyname : null-foreign-key class rdf = Graph() rdf.bind ('xsd', XSD) rdf.bind ('owl', OWL) if ontology_uri is None: ontology_uri = ns if ontology_uri[-1] in ['#', '/']: ontology_uri = ontology_uri[:-1] ontology_uri = URIRef (ontology_uri) rdf.add ((ontology_uri, RDF.type, OWL.Ontology)) rdf.add ((RDF.value, RDF.type, OWL.DatatypeProperty)) for t in ds.tables: t_uri = URIRef ("%st-%s" % (ns, t.uri_name)) _manage_table (t_uri, t, ns, rdf) for c in t.columns: _manage_column (t_uri, c, ns, rdf) for i in t.unique_indexes: if len (i) > 1: _manage_unique_index (t_uri, i, ns, rdf) for f in t.foreign_keys: _manage_foreignkey (t_uri, f, ns, rdf) return rdf
7c5f20d6795a06776fe83a77e3f573f6da11ff3e
2,846
def is_remote(path): """Determine whether a file is in a remote location (which can be handled) based on prefix of connection string.""" for token in ["s3://", "http://", "https://"]: # add if path.startswith(token): return True return False
b459e20104b6e0e326a86ef44b53e18a335ded96
2,847
def saliency_map(output, input, name="saliency_map"): """ Produce a saliency map as described in the paper: `Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps <https://arxiv.org/abs/1312.6034>`_. The saliency map is the gradient of the max element in output w.r.t input. Returns: tf.Tensor: the saliency map. Has the same shape as input. """ max_outp = tf.reduce_max(output, 1) saliency_op = tf.gradients(max_outp, input)[:][0] saliency_op = tf.identity(saliency_op, name=name) return saliency_op
ae59afa3a3f449ccbee22644644699dd7033bdf0
2,848
import io import contextlib def pretty_tree(*, program: str = None, file: str = None) -> str: """Get a pretty-printed string of the parsed AST of the QASM input. The input will be taken either verbatim from the string ``program``, or read from the file with name ``file``. Use exactly one of the possible input arguments, passed by keyword. Args: program: a string containing the QASM to be parsed. file: a string of the filename containing the QASM to be parsed. Returns: a pretty-printed version of the parsed AST of the given program. Raises: ValueError: no input is given, or too many inputs are given. Qasm3ParserError: the input was not parseable as valid QASM 3. """ if program is not None and file is not None: raise ValueError("Must supply only one of 'program' and 'file'.") if program is not None: input_stream = antlr4.InputStream(program) elif file is not None: input_stream = antlr4.FileStream(file, encoding="utf-8") else: raise ValueError("One of 'program' and 'file' must be supplied.") # ANTLR errors (lexing and parsing) are sent to stderr, which we redirect # to the variable `err`. with io.StringIO() as err, contextlib.redirect_stderr(err): lexer = qasm3Lexer(input_stream) token_stream = antlr4.CommonTokenStream(lexer) parser = qasm3Parser(token_stream) tree = _pretty_tree_inner(parser.program(), parser.ruleNames, 0) error = err.getvalue() if error: raise Qasm3ParserError(f"Parse tree build failed. Error:\n{error}") return tree
f5436af958dd2bff7aaa297daaa27ef817c619a7
2,849
def get_models(datasets): """It obtains the models used into the experiments""" dataframe = pd.read_csv('../results/' + datasets[0] + '/results.csv', sep=';') models = dataframe['MODEL'].unique() return models.tolist()
f22419d9784630746d4f0d35765f9ffc8314c7fd
2,850
def _gr_xmin_ ( graph ) : """Get x-min for the graph >>> xmin = graph.xmin() """ # _size = len ( graph ) if 0 == _sise : return 0 # x_ = ROOT.Double(0) v_ = ROOT.Double(0) graph.GetPoint ( 0 , x_ , v_ ) # return x_
5b53588f0e23d42627c205969c700a16b871ff58
2,851
def eliminate(values): """ Go through all the boxes, and whenever there is a box with a value, eliminate this value from the values of all its peers. Input: A sudoku in dictionary form. Output: The resulting sudoku in dictionary form. """ solved_values = [box for box in values.keys() if len(values[box]) == 1] for box in solved_values: digit = values[box] for peer in peers[box]: values[peer] = values[peer].replace(digit,'') return values
5e28acbe0ea7cd528e9e1dc77a411d20bd253a9a
2,852
def route_distance(route): """ returns the distance traveled for a given tour route - sequence of nodes traveled, does not include start node at the end of the route """ dist = 0 prev = route[-1] for node in route: dist += node.euclidean_dist(prev) prev = node return dist
227b6476f6abd9efdf690062e0d4034c4ece2408
2,853
import torch def adjust_matrix(matrix): """ Sorting matrix cols. matrix: can be a numpy 2d-array or pytorch 2d-Tensor Return ------ adjusted pytorch 2d-tensor """ if isinstance(matrix, np.ndarray): tmp = torch.from_numpy(matrix).clone() # ? else: tmp = matrix.clone() tmp /= tmp[:, 0].view([-1, 1]) tmp = sk.col_normalize(tmp, torch.ones(3, dtype=torch.float64)) if torch.sum(torch.log(tmp[:, 1])) > torch.sum(torch.log(tmp[:, 2])): # return torch.from_numpy(matrix) return 2 return 1 # ref = matrix[:, 1].copy() # matrix[:, 1] = matrix[:, 2] # matrix[:, 2] = ref # return torch.from_numpy(matrix)
b045ef16e8b359ff873176265a4ce3e96a973504
2,854
def get_student_graph(pool, student, student_friends, friends_students, need_spinglass=False): """ Получение социального графа пользователя. :param pool: пул процессов (библиотека multiprocessing) :param student: идентификатор пользователя :param student_friends: список друзей пользователя :param friends_students: список друзей пользователя, имеющих такое же учебное заведение :param need_spinglass: True, если требуется использовать комбинацию алгоритмов кластеризации multilevel и spinglass :return: социальный граф пользователя (библиотека NetworkX) """ graph = nx.Graph() for u, fs in pool.imap_unordered(get_friends, student_friends): graph.add_edges_from((u, f) for f in fs & student_friends) cluster = get_friends_students_cluster(graph, friends_students, need_spinglass) graph = graph.subgraph(cluster) for u, fs in pool.imap_unordered(get_friends, graph.nodes()): graph.add_edges_from((u, f) for f in fs - student_friends - {student}) redundant_nodes = {node for node, degree in nx.degree(graph).items() if degree <= 2} graph.remove_nodes_from(redundant_nodes) foafs = set(graph.nodes()) - student_friends for u, fs in pool.imap_unordered(get_friends, foafs): graph.add_edges_from((u, f) for f in fs & foafs) return graph
90adffdeea74b5176b24b1a57e86d52a86b38046
2,855
import os def wdirectory(path): """ Change the work directory for a specific path of the data ___ path: string, data path in the system """ return os.chdir(path)
ca44546ca3d35e85c3dc339fd53fd0d79bf63ecd
2,856
def ile_robil_czy_mial_dobe(dzien, zp, grafik): """Czy miał dobę danego dnia?""" godzin = 0 for wpis in Wpis.objects.filter(user=zp.user, grafik=grafik, dzien=dzien): godzin += wpis.pion.ile_godzin(dzien) return (godzin, godzin == 24)
1dd9223b29fc330ad70ea8cd3fc3c4194cfd9063
2,857
def load_teacher(): """ load ready-to-go teacher from "https://towardsdatascience.com/advanced-dqns-playing-pac-man-with-deep-reinforcement-learning-3ffbd99e0814" :return: a trained teacher model trained with double dueling dqn with prioritized ER """ dqn = DQNPacman(input_size=dense_config.input_size, output_size=dense_config.output_size, model_path=dense_config.model_path, scope=dense_config.scope, epsilon_stop=dense_config.final_epsilon, epsilon=dense_config.initial_epsilon) dqn.load_model() return dqn
1428b77b7387f7b0307558e719da65e275a10abf
2,858
from typing import List import json def exception_logged(result_output: str, exc: Exception) -> bool: """Small utility to search click result output for a specific excpetion . Args: result_output: The click result output string to search. exc: The exception to search for. Returns: bool: Whether or not the exception was found """ seen_lines: List[dict] = [] for line in result_output.splitlines(): parsed_line = json.loads(line) seen_lines.append(parsed_line) for line in seen_lines: if line.get("exc_info"): if repr(exc) in line.get("exc_info"): return True return False
2148cabf18c0f7d36311e913160191759ca1ce6b
2,859
def hello(world): """Hello, You!""" return "Hello, {}!".format(world)
d08d2685d3341f0b6474dbd40fc7f9650ddc1092
2,860
def uniform_transition_matrix(p=0.01, N=24): """Computes uniform transition matrix Notebook: C5/C5S3_ChordRec_HMM.ipynb Args: p (float): Self transition probability (Default value = 0.01) N (int): Column and row dimension (Default value = 24) Returns: A (np.ndarray): Output transition matrix """ off_diag_entries = (1-p) / (N-1) # rows should sum up to 1 A = off_diag_entries * np.ones([N, N]) np.fill_diagonal(A, p) return A
d5f4ea5516de7b9d0d3a6f41ce28921634c2f309
2,861
def __is_connected__(g): """ Checks if a the directed acyclic graph is connected. :return: A boolean indicating if the graph is connected. """ u = __convert_to_undirected_graph__(g) return nx.is_connected(u)
bd1516ac8051326932a38bf348f8b1498bed2f07
2,862
def get_segments(tokens, max_seq_length): """Segments: 0 for the first sequence, 1 for the second""" if len(tokens)>max_seq_length: raise IndexError("Token length more than max seq length!") segments = [] current_segment_id = 0 for token in tokens: segments.append(current_segment_id) if token == "[SEP]": current_segment_id = 1 return np.array(segments + [0] * (max_seq_length - len(tokens)))
f3510e04ae44d6d479fe4325dbd257fbcab6cdbc
2,863
import torch def GRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None, linear_func=None): """ Copied from torch.nn._functions.rnn and modified """ if linear_func is None: linear_func = F.linear if input.is_cuda and linear_func is F.linear and fusedBackend is not None: gi = linear_func(input, w_ih) gh = linear_func(hidden, w_hh) state = fusedBackend.GRUFused.apply return state(gi, gh, hidden) if b_ih is None else state(gi, gh, hidden, b_ih, b_hh) gi = linear_func(input, w_ih, b_ih) gh = linear_func(hidden, w_hh, b_hh) i_r, i_i, i_n = gi.chunk(3, 1) h_r, h_i, h_n = gh.chunk(3, 1) resetgate = torch.sigmoid(i_r + h_r) inputgate = torch.sigmoid(i_i + h_i) newgate = torch.tanh(i_n + resetgate * h_n) hy = newgate + inputgate * (hidden - newgate) return hy
b21c4efc7b4a2f4caa022d76acfc496444f4e991
2,864
import glob def _toggle_debug_mode() -> bool: """Set debug to true or false. Can be used for debugging purposes such that exceptions are raised (including the stack trace) instead of suppressed. Note: the debug status is always printed when executing this method. Returns: Boolean indicating the status of the DEBUG global. """ if glob.DEBUG: glob.DEBUG = False print("Debugging turned off.") else: glob.DEBUG = True print("Debugging turned on.") return glob.DEBUG
96d95c7b54d2760af3bc37186cdff759f270af9c
2,865
import os import sys def GetAppBasename(): """Returns the friendly basename of this application.""" return os.path.basename(sys.argv[0])
fbc858bc4c016ef18bc31fb07b2d085f47ff3976
2,866
import subprocess def connect(dbtype: str, **kwargs) -> subprocess.Popen: """ Creates a connection to the database server """ # create subprocess process = subprocess.Popen('/bin/bash', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=10) # connect process to database server stderr_out = 'errtemp' cmd = parse_connection_request(dbtype, stderr=stderr_out, **kwargs) # debug #print(cmd) process.stdin.write(bytes(cmd, 'utf-8')) # type: ignore # get stderr from errtemp file error_msg = _get_stderr(stderr_out) #print(error_msg) if error_msg: process.communicate() raise ConnectionRefusedError(error_msg) return process
65f3bc4470a8daa5a8442df9e0ce239c1939f153
2,867
def client(mock_settings) -> StructurizrClient: """Provide a client instance with the mock settings.""" return StructurizrClient(settings=mock_settings)
df3c86aed9a924b1ce71ee1be6df9598fa67e39d
2,868
def mean_iou(y_true, y_pred, **kwargs): """ Compute mean Intersection over Union of two segmentation masks, via Keras. Calls metrics_k(y_true, y_pred, metric_name='iou'), see there for allowed kwargs. """ return seg_metrics(y_true, y_pred, metric_name='iou', drop_last = False, **kwargs)
8aec0cd7ce6413c36f187a9cef855f380cd6959b
2,869
from copy import deepcopy from futile.Utils import function_signature_regenerator as fsr def get_python_function(target_kwargs_function,func_name,func_spec): """Convert a argparse spec into a python function This function provides a python function with a signature indicated by the ``fun_spec`` dictionary With the conventions of the :f:mod:`yaml_argparse` modules. The :py:func:`futile.Utils.function_signature_regenerator` function is used for the conversion Args: target_kwargs_function (func): the keyword arguments function we want to give the signature to. func_name (str): Name of the function, usually the key of the dictionary whose ``func_spec`` is the value func_spec (dict) : dictionary of the function specifications to be provided to the :py:func:`futile.Utils.function_signature_regenerator` function. Returns: func: the genreated function with signature given by the arguments of ``func_spec`` defaulting to their default value. Todo: Create the docstring of the generated function by also including the docstring of the arguments """ fspec=deepcopy(func_spec) docstring=fspec.pop("help") if "shorthelp" in fspec: fspec.pop("shorthelp") key_args = {key: val["default"] for (key, val) in fspec["args"].items()} return fsr(target_kwargs_function, fun_name=func_name, fun_docstring=docstring,**key_args)
6f25fd3d59bac5d0827345c36a348e8cac7350ac
2,870
def ydbdr2rgb(ydbdr, *, channel_axis=-1): """YDbDr to RGB color space conversion. Parameters ---------- ydbdr : (..., 3, ...) array_like The image in YDbDr format. By default, the final dimension denotes channels. channel_axis : int, optional This parameter indicates which axis of the array corresponds to channels. Returns ------- out : (..., 3, ...) ndarray The image in RGB format. Same dimensions as input. Raises ------ ValueError If `ydbdr` is not at least 2-D with shape (..., 3, ...). Notes ----- This is the color space commonly used by video codecs, also called the reversible color transform in JPEG2000. References ---------- .. [1] https://en.wikipedia.org/wiki/YDbDr """ return _convert(rgb_from_ydbdr, ydbdr, name='ydbdr2rgb')
9646d75712fc607cace24eb6189228ea2d5308f1
2,871
import typing def execute( connection_info: NodeConnectionInfo, block_id: typing.Union[None, bytes, str, int] = None ) -> dict: """Returns current auction system contract information. :param connection_info: Information required to connect to a node. :param block_id: Identifier of a finalised block. :returns: Current auction system contract information. """ # Get latest. # TODO: verify as a null block should return latest auction infor anyway. if isinstance(block_id, type(None)): block: dict = get_block(connection_info) block_id: str = block["hash"] # Get by hash - bytes | hex. if isinstance(block_id, (bytes, str)): response = rpc_client.request( connection_info.address_rpc, constants.RPC_STATE_GET_AUCTION_INFO, block_identifier={ "Hash": block_id.hex() if isinstance(block_id, bytes) else block_id } ) # Get by height. elif isinstance(block_id, int): response = rpc_client.request( connection_info.address_rpc, constants.RPC_STATE_GET_AUCTION_INFO, block_identifier={ "Height": block_id } ) return response.data.result
9c318c3a1b63b9b30033290c56ac96a589c46104
2,872
def direction_to_point(pos1: IntVector2D, pos2: IntVector2D) -> Grid4TransitionsEnum: """ Returns the closest direction orientation of position 2 relative to position 1 :param pos1: position we are interested in :param pos2: position we want to know it is facing :return: direction NESW as int N:0 E:1 S:2 W:3 """ diff_vec = np.array((pos1[0] - pos2[0], pos1[1] - pos2[1])) axis = np.argmax(np.power(diff_vec, 2)) direction = np.sign(diff_vec[axis]) if axis == 0: if direction > 0: return Grid4TransitionsEnum.NORTH else: return Grid4TransitionsEnum.SOUTH else: if direction > 0: return Grid4TransitionsEnum.WEST else: return Grid4TransitionsEnum.EAST
d13246d64b79050b19189d77047c1390d3d40448
2,873
def handle_nullboolean(field, request_get): """Build a list of chips for NullBooleanField field.""" value = yesno( field.value(), pgettext_lazy('Possible values of boolean filter', 'yes,no,all')) return [{ 'content': CHIPS_PATTERN % (field.label, value), 'link': get_cancel_url(request_get, field.name)}]
0544ffb0f4054fe6c6447b811fb8a8b8dbf0ca46
2,874
def rob(nums): """ :type nums: List[int] :rtype: int """ if nums == [] or len(nums) == 0: return 0 elif len(nums) == 1: return nums[0] runningTotal = [-1, -1] runningTotal[0] = nums[0] runningTotal[1] = max(nums[0], nums[1]) for i in range(2, len(nums)): runningTotal.append(max([nums[i] + runningTotal[i - 2], runningTotal[i - 1]])) return runningTotal[-1]
e58e4d04cbe490b9bd2957d23c5dfd42e92aa0fb
2,875
def data_coded_table(request, project_pk): """This returns the labeled data. Args: request: The POST request project_pk: Primary key of the project Returns: data: a list of data information """ project = Project.objects.get(pk=project_pk) data_objs = DataLabel.objects.filter(data__project=project, data__irr_ind=False) data = [] for d in data_objs: temp = { "Text": escape(d.data.text), "Label": d.label.name, "Coder": d.profile.__str__(), } data.append(temp) return Response({"data": data})
b6b98a85a80986c6ca045f79e9e478b798e81d4e
2,876
def when(name, converters=None): """When step decorator. :param name: Step name. :param converters: Optional `dict` of the argument or parameter converters in form {<param_name>: <converter function>}. :param parser: name of the step parser to use :param parser_args: optional `dict` of arguments to pass to step parser :raises: StepError in case of wrong configuration. """ return _step_decorator(WHEN, name, converters=converters)
cbcca0184ba8951e60e8324addef0497888956ec
2,877
def displacement(current: np.ndarray, previous: np.ndarray) -> np.array: """Computes the displacement vector between the centroids of two storms. :param current: the intensity-weighted centroid of the storm in the current time slice, given as a tuple. :param previous: the intensity-weighted centroid of the storm in the previous time slice, given as a tuple. :return: the displacement vector, as an array. """ return np.array([current[0] - previous[0], current[1] - previous[1]])
551ee24a92c2709f0af630d8fab726648da5d026
2,878
def update_datapackage(datapackage, mappings): """Update the field names and delete the `maps_to` properties.""" for i, resource in enumerate(datapackage['resources']): fields = [] for field in resource['schema']['fields']: fiscal_key = mappings[i][field['name']] if fiscal_key not in ('_unknown', '_ignored'): field.update({'name': fiscal_key}) del field['maps_to'] if 'translates_to' in field: del field['translates_to'] fields.append(field) resource['schema']['fields'] = fields return datapackage
f56cf5917331a55d2ac0d5783e0b9c3962eccb5f
2,879
import pybel def get_molpro_mol(logfile): """ Returns xyz file from molpro logfile. """ return pybel.readfile('mpo',logfile).next()
597418e59f722e4a3f30b9652266de9f131346cf
2,880
def tbody(content, accesskey:str ="", class_: str ="", contenteditable: str ="", data_key: str="", data_value: str="", dir_: str="", draggable: str="", hidden: str="", id_: str="", lang: str="", spellcheck: str="", style: str="", tabindex: str="", title: str="", translate: str=""): """ Returns a table body.\n `content`: Contents of the table body.\n """ g_args = global_args(accesskey, class_, contenteditable, data_key, data_value, dir_, draggable, hidden, id_, lang, spellcheck, style, tabindex, title, translate) return f"<tbody {g_args}>{content}</tbody>\n"
2e47b4e4d995b4100ee9fcb2f408f8f1816e768e
2,881
import json from sys import path def create_controller(): """ 1. Check the token 2. Call the worker method 3. Show results """ minimum_buffer_min = 3 token_ok = views.ds_token_ok(minimum_buffer_min) if token_ok and 'envelope_id' in session: # 2. Call the worker method args = { 'account_id': session['ds_account_id'], 'envelope_id': session['envelope_id'], 'base_path': session['ds_base_path'], 'ds_access_token': session['ds_access_token'], } try: results = worker(args) except ApiException as err: error_body_json = err and hasattr(err, 'body') and err.body # we can pull the DocuSign error code and message from the response body error_body = json.loads(error_body_json) error_code = error_body and 'errorCode' in error_body and error_body['errorCode'] error_message = error_body and 'message' in error_body and error_body['message'] # In production, may want to provide customized error messages and # remediation advice to the user. return render_template('error.html', err=err, error_code=error_code, error_message=error_message ) return render_template("example_done.html", title="Get envelope status results", h1="Get envelope status results", message="Results from the Envelopes::get method:", json=json.dumps(json.dumps(results.to_dict())) ) elif not token_ok: flash('Sorry, you need to re-authenticate.') # We could store the parameters of the requested operation # so it could be restarted automatically. # But since it should be rare to have a token issue here, # we'll make the user re-enter the form data after # authentication. session['eg'] = url_for(eg) return redirect(url_for('ds_must_authenticate')) elif not 'envelope_id' in session: return render_template("eg004_envelope_info.html", title="Envelope information", envelope_ok=False, source_file=path.basename(__file__), source_url=ds_config.DS_CONFIG['github_example_url'] + path.basename(__file__), documentation=ds_config.DS_CONFIG['documentation'] + eg, show_doc=ds_config.DS_CONFIG['documentation'], )
5a037ce622c42e11143856625934089a01ea7909
2,882
import six def pack(number, word_size = None, endianness = None, sign = None, **kwargs): """pack(number, word_size = None, endianness = None, sign = None, **kwargs) -> str Packs arbitrary-sized integer. Word-size, endianness and signedness is done according to context. `word_size` can be any positive number or the string "all". Choosing the string "all" will output a string long enough to contain all the significant bits and thus be decodable by :func:`unpack`. `word_size` can be any positive number. The output will contain word_size/8 rounded up number of bytes. If word_size is not a multiple of 8, it will be padded with zeroes up to a byte boundary. Arguments: number (int): Number to convert word_size (int): Word size of the converted integer or the string 'all' (in bits). endianness (str): Endianness of the converted integer ("little"/"big") sign (str): Signedness of the converted integer (False/True) kwargs: Anything that can be passed to context.local Returns: The packed number as a string. Examples: >>> pack(0x414243, 24, 'big', True) b'ABC' >>> pack(0x414243, 24, 'little', True) b'CBA' >>> pack(0x814243, 24, 'big', False) b'\\x81BC' >>> pack(0x814243, 24, 'big', True) Traceback (most recent call last): ... ValueError: pack(): number does not fit within word_size >>> pack(0x814243, 25, 'big', True) b'\\x00\\x81BC' >>> pack(-1, 'all', 'little', True) b'\\xff' >>> pack(-256, 'all', 'big', True) b'\\xff\\x00' >>> pack(0x0102030405, 'all', 'little', True) b'\\x05\\x04\\x03\\x02\\x01' >>> pack(-1) b'\\xff\\xff\\xff\\xff' >>> pack(0x80000000, 'all', 'big', True) b'\\x00\\x80\\x00\\x00\\x00' """ if sign is None and number < 0: sign = True if word_size != 'all': kwargs.setdefault('word_size', word_size) kwargs.setdefault('endianness', endianness) kwargs.setdefault('sign', sign) with context.local(**kwargs): # Lookup in context if not found word_size = 'all' if word_size == 'all' else context.word_size endianness = context.endianness sign = context.sign if not isinstance(number, six.integer_types): raise ValueError("pack(): number must be of type (int,long) (got %r)" % type(number)) if sign not in [True, False]: raise ValueError("pack(): sign must be either True or False (got %r)" % sign) if endianness not in ['little', 'big']: raise ValueError("pack(): endianness must be either 'little' or 'big' (got %r)" % endianness) # Verify that word_size make sense if word_size == 'all': if number == 0: word_size = 8 elif number > 0: if sign == False: word_size = ((number.bit_length() - 1) | 7) + 1 else: word_size = (number.bit_length() | 7) + 1 else: if sign == False: raise ValueError("pack(): number does not fit within word_size") word_size = ((number + 1).bit_length() | 7) + 1 elif not isinstance(word_size, six.integer_types) or word_size <= 0: raise ValueError("pack(): word_size must be a positive integer or the string 'all'") if sign == True: limit = 1 << (word_size-1) if not -limit <= number < limit: raise ValueError("pack(): number does not fit within word_size") else: limit = 1 << word_size if not 0 <= number < limit: raise ValueError("pack(): number does not fit within word_size [%i, %r, %r]" % (0, number, limit)) # Normalize number and size now that we have verified them # From now on we can treat positive and negative numbers the same number = number & ((1 << word_size) - 1) byte_size = (word_size + 7) // 8 out = [] for _ in range(byte_size): out.append(_p8lu(number & 0xff)) number = number >> 8 if endianness == 'little': return b''.join(out) else: return b''.join(reversed(out))
e7d6a356f56e9e9c05e20af91a03c9fff2638773
2,883
def getSourceUrls(db): """获取未被爬取的文献来源链接""" sql = """ SELECT DISTINCT re_article_source.url_source FROM re_article_source LEFT JOIN source ON re_article_source.url_source = source.url WHERE source.url IS NULL """ # sql = 'SELECT DISTINCT re_article_source.url_source FROM re_article_source LEFT JOIN source ON re_article_source.url_article=source.url WHERE source.url is NULL' curr = db.cursor() curr.execute(sql) urls = [] for data in curr.fetchall(): url = data[0] urls.append(url) return urls
edc84e224b76ff84ffef5f12845add6680ccb25d
2,884
def ML_bump(x,v=None,logger=None): """ ML fit of the bump function Parameters ---------- x : (n,d) ndarray coML estimatearaites v : (n,) ndarray weight for each sample Returns ------- mu : (n,d) ndarray bump mean parameter (for each dimension) sigma : (n,d) ndarray bump std parameter (for each dimension) """ def ML_bump_1d(x,v,logger=None): def fit_f(param,x,v): mu,sigma = param inv_sigma = 1/sigma Z = sp.stats.norm.cdf(1,loc=mu,scale=sigma)-sp.stats.norm.cdf(0,loc=mu,scale=sigma) inv_Z = 1/Z phi_alpha = 1/np.sqrt(2*np.pi)*np.exp(-mu**2/2/sigma**2) phi_beta = 1/np.sqrt(2*np.pi)*np.exp(-(1-mu)**2/2/sigma**2) # Average likelihood if v is None: t1 = np.mean(x-mu) t2 = np.mean((x-mu)**2) else: t1 = np.sum((x-mu)*v) / np.sum(v) t2 = np.sum((x-mu)**2*v) / np.sum(v) l = -np.log(Z) - np.log(sigma) - t2/2/sigma**2 # Gradient d_c_mu = inv_sigma * (phi_alpha-phi_beta) d_c_sig = inv_sigma * (-mu*inv_sigma*phi_alpha - (1-mu)*inv_sigma*phi_beta) d_l_mu = -d_c_mu*inv_Z + t1*inv_sigma**2 d_l_sig = -d_c_sig*inv_Z - inv_sigma + t2*inv_sigma**3 grad = np.array([d_l_mu,d_l_sig],dtype=float) return l,grad ## gradient check #_,grad_ = fit_f([0.2,0.1],x,v) #num_dmu = (fit_f([0.2+1e-8,0.1],x,v)[0]-fit_f([0.2,0.1],x,v)[0]) / 1e-8 #num_dsigma = (fit_f([0.2,0.1+1e-8],x,v)[0]-fit_f([0.2,0.1],x,v)[0]) / 1e-8 #print('## Gradient check ##') #print('# param value: mu=%0.6f, sigma=%0.6f'%(0.2,0.1)) #print('# Theoretical grad: dmu=%0.8f, dsigma=%0.8f'%(grad_[0],grad_[1])) #print('# Numerical grad: dmu=%0.8f, dsigma=%0.8f\n'%(num_dmu,num_dsigma)) # If the variance is small and the mean is at center, # directly output the empirical mean and variance. if v is None: mu = np.mean(x) sigma = np.std(x) else: mu = np.sum(x*v)/np.sum(v) sigma = np.sqrt(np.sum((x-mu)**2*v)/np.sum(v)) if sigma<0.075 and np.min([1-mu,mu])>0.15: return mu,sigma param = np.array([mu,sigma]) lr = 0.01 max_step = 0.025 max_itr = 100 i_itr = 0 l_old = -10 while i_itr<max_itr: l,grad = fit_f(param,x,v) if np.absolute(l-l_old)<0.001: break else: l_old=l update = (grad*lr).clip(min=-max_step,max=max_step) param += update i_itr +=1 if np.isnan(param).any() or np.min([param[0],1-param[0],param[1]])<0: return np.mean(x),np.std(x) mu,sigma = param if sigma>0.25: sigma=1 return mu,sigma mu = np.zeros(x.shape[1],dtype=float) sigma = np.zeros(x.shape[1],dtype=float) for i in range(x.shape[1]): mu[i],sigma[i] = ML_bump_1d(x[:,i],v,logger=logger) return mu,sigma
2ac410b6a756d97df1dbbd42b571a38835efb5d0
2,885
import argparse def parse_arguments(args_to_parse): """ Parse the command line arguments. """ description = "Find targets which contain a None reference" parser = argparse.ArgumentParser(description=description) parser.add_argument( '-d', '--directory-to-search', type=str, required=True, help='Directory to search for anomylous target files' ) parser.add_argument( '-o', '--output-file', type=str, required=True, help='File to save the names of all occurences' ) args = parser.parse_args(args_to_parse) return args
6f5b849356baf3bece4731918240d7ed5e692bb8
2,886
def function_f1a(x): """Function with one argument, returning one value. :type x: types.IntType :rtype: types.StringType """ return '{}'.format(x)
2ccdaa819ad83902353a1c823fae7f4db3eca487
2,887
from typing import Union import socket def is_port_in_use(hostname: str, port: Union[int, str]) -> bool: """ Check if TCP/IP `port` on `hostname` is in use """ with socket() as sock: try: sock.bind((hostname, int(port))) return False except OSError as err: if "Address already in use" in repr(err): return True raise err
958e9dced4f5b3850f1b2f66f42e8ce21b7d3548
2,888
def _pos_from_before_after( before: int, after: int, length: int, base0: bool ) -> int: """Get the position to insert from before and after""" if before is not None and after is not None: raise ValueError("Can't specify both `_before` and `_after`.") if before is None and after is None: return length if after is not None: return position_after(after, length, base0) return position_at(before, length, base0)
8a3fe871c144b00d6bcb4f1286726124f48302de
2,889
import copy def prep_incorporation_correction_filing(session, business, original_filing_id, payment_id, option, name_change_with_new_nr): """Return a new incorporation correction filing prepped for email notification.""" filing_template = copy.deepcopy(CORRECTION_INCORPORATION) filing_template['filing']['business'] = {'identifier': business.identifier} for party in filing_template['filing']['incorporationApplication']['parties']: for role in party['roles']: if role['roleType'] == 'Completing Party': party['officer']['email'] = '[email protected]' filing_template['filing']['incorporationApplication']['contactPoint'] = {} filing_template['filing']['incorporationApplication']['contactPoint']['email'] = '[email protected]' filing_template['filing']['correction']['correctedFilingId'] = original_filing_id if not name_change_with_new_nr: del filing_template['filing']['incorporationApplication']['nameRequest']['legalName'] else: filing_template['filing']['incorporationApplication']['nameRequest']['nrNumber'] = 'NR 1234567' filing = create_filing(token=payment_id, filing_json=filing_template, business_id=business.id) filing.payment_completion_date = filing.filing_date filing.save() if option in ['COMPLETED', 'bn']: uow = versioning_manager.unit_of_work(session) transaction = uow.create_transaction(session) filing.transaction_id = transaction.id filing.save() return filing
604667c22087304e6f1ffd4e9a51596722952f9e
2,890
import logging def get_logger(): """ Return the custom showyourwork logger. Sets up the logging if needed. """ logger = logging.getLogger("showyourwork") # Add showyourwork stream & file handlers if not logger.handlers: # Root level logger.setLevel(logging.DEBUG) # Terminal: all messages stream_handler = ColorizingStreamHandler() stream_handler.setLevel(logging.INFO) logger.addHandler(stream_handler) try: LOGS = paths.user().logs except: # Can't resolve path to logs; assume we're not # in a showyourwork/git repo and fail silently. pass else: # File: all showyourwork messages msg_file = LOGS / "showyourwork.log" file_handler = logging.FileHandler(msg_file) file_handler.setLevel(logging.DEBUG) logger.addHandler(file_handler) return logger
051e263422e84d10fa99fa9627a7bff9e5cc9f0b
2,891
import random def web_videos_random_archived(channel): """Play random archived video. Chooses random archived video from selected channel and redirects to its detail page view. Args: channel (str): YouTube channel ID. Returns: flask.Response: Selected video detail view. """ try: choice = random.choice([ video['snippet']['resourceId']['videoId'] for video in yt_get_channel_videos(channel) if video['archived'] is not None ]) except IndexError: return flask.redirect(flask.url_for('videos', channel = channel )) return flask.redirect(flask.url_for('videos', channel = channel, video = choice) )
6d05832fb4529f3c17b6f6dbdc8c900642cdcbdf
2,892
def fundamental_mode_mfd_marcuse(wl, r, na): """Calculates the mode field diameter of the fundamental mode with vacuum wavelength wl using Marcuse's equation. :param wl: Wavelength of the mode :type wl: float :param r: Core radius :type r: float :param na: Core numerical aperture :type na: float :returns: Mode field diameter of the fundamental mode :rtype: float """ v = fiber_v_parameter(wl, r, na) return 2 * r * (0.65 + 1.619*v**(-3/2) + 2.879*v**(-6))
570d680e5c23b5e5fb5d3528a6bd1fc9d6c55168
2,893
def generate_ansible_coverage_config(): # type: () -> str """Generate code coverage configuration for Ansible tests.""" coverage_config = ''' [run] branch = True concurrency = multiprocessing parallel = True omit = */python*/dist-packages/* */python*/site-packages/* */python*/distutils/* */pyshared/* */pytest */AnsiballZ_*.py */test/results/* ''' return coverage_config
88fa630613ff12cb5fd33f90883393ee21b574fa
2,894
def gauss_smooth_shift(input, shift, stddev, scale=1.0): """ smooths the input with gaussian smooothing with standarddeviation and shifts its delay positions :param input: The input array :param shift: the amount of indices to shift the result :param the stddev for the gaussian smoothing (in index count) :param scale: scale the input array first with scale :return: the smoothed and shifted array """ forcescale = False if isinstance(scale, np.ndarray): forcescale = True if (forcescale or np.abs(scale-1) > 1e-5): input = input*scale result = input if (stddev > 0.0): result = gaussian_filter1d(input, stddev, mode='nearest') result = np.roll(result, int(shift)) if (shift > 0): result[: int(shift)] = 0 #else: # backward roll can simply use the trailing values return result
5bf614e544dc13bd190c7bb260f3962557d143fd
2,895
def mel_to_hz(mel): """From Young et al. "The HTK book", Chapter 5.4.""" return 700.0 * (10.0**(mel / 2595.0) - 1.0)
8306b95bcdf866dda0759a71c2d5d538155173df
2,896
def create_app(path=None, user_content=False, context=None, username=None, password=None, render_offline=False, render_wide=False, render_inline=False, api_url=None, title=None, text=None, autorefresh=None, quiet=None, grip_class=None): """ Creates an Grip application with the specified overrides. """ # Customize the app if grip_class is None: grip_class = Grip # Customize the reader if text is not None: display_filename = DirectoryReader(path, True).filename_for(None) source = TextReader(text, display_filename) elif path == '-': source = StdinReader() else: source = DirectoryReader(path) # Customize the renderer if render_offline: renderer = OfflineRenderer(user_content, context) elif user_content or context or api_url: renderer = GitHubRenderer(user_content, context, api_url) else: renderer = None # Optional basic auth auth = (username, password) if username or password else None # Create the customized app with default asset manager return grip_class(source, auth, renderer, None, render_wide, render_inline, title, autorefresh, quiet)
4a1c46677a71b18f3369f9158eacf5050ca85f87
2,897
import subprocess import re def get_simulator_version(): """ Get the installed version of XPP Returns: :obj:`str`: version """ result = subprocess.run(["xppaut", "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) if result.returncode != 0: raise RuntimeError('XPP failed: {}'.format(result.stdout.decode("utf-8"))) return re.search(r"(\d+\.\d*|\d*\.\d+)", result.stdout.decode("utf-8")).group(0)
93949c25d79553a3cab361f3d156f723d52d5560
2,898
def enumerate_joint(variables, e, P): """Return the sum of those entries in P consistent with e, provided variables is P's remaining variables (the ones not in e).""" if not variables: return P[e] Y, rest = variables[0], variables[1:] return sum([enumerate_joint(rest, extend(e, Y, y), P) for y in P.values(Y)])
649dfdf0b913f7c4fb74d18d73cd8684356d4418
2,899