content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import re def get_puppetfile_tags(puppetfile): """ obtain tags from Puppetfile :return: tuple(list, list) """ regex_vcs = re.compile(r"^:(git|svn)\s+=>\s+['\"](.+)['\"]\,", re.I) regex_tag = re.compile(r"^:(ref|tag|commit|branch)\s+=>\s+['\"](.+)['\"]\,?", re.I) vcss = [] tags = [] with open(puppetfile) as f: for line in f: match_vcs = regex_vcs.match(line.strip()) if match_vcs: vcss.append(match_vcs.group(2)) match_tag = regex_tag.match(line.strip()) if match_tag: tags.append(match_tag.group(2)) if len(vcss) == len(tags): return vcss, tags
6beec37d4c8a3a3b9a2c845cea0f5e12e18af620
6,147
def organize_array_by_rows(unformatted_array, num_cols): """Take unformatted array and make grid array""" num_rows = int(len(unformatted_array) / num_cols) array = [] for row in range(num_rows): array.append(unformatted_array[row * num_cols:(row + 1) * num_cols]) return array
8a7d74ea593bfcc5c4d3a92d1c192b2bf628f641
6,148
def infer(model, text_sequences, input_lengths): """ An inference hook for pretrained synthesizers Arguments --------- model: Tacotron2 the tacotron model text_sequences: torch.Tensor encoded text sequences input_lengths: torch.Tensor input lengths Returns ------- result: tuple (mel_outputs_postnet, mel_lengths, alignments) - the exact model output """ return model.infer(text_sequences, input_lengths)
e7937395956e2dcd35dd86bc23599fbb63417c22
6,149
def other_language_code(): """Language code used for testing, currently not set by user.""" return 'de-DE'
2cbac23cd7a13e71991be6516a3a38dee19ae690
6,151
def _is_course_or_run_deleted(title): """ Returns True if '[delete]', 'delete ' (note the ending space character) exists in a course's title or if the course title equals 'delete' for the purpose of skipping the course Args: title (str): The course.title of the course Returns: bool: True if the course or run should be considered deleted """ title = title.strip().lower() if ( "[delete]" in title or "(delete)" in title or "delete " in title or title == "delete" ): return True return False
c32c69e15fafbc899048b89ab8199f653d59e7a8
6,156
from typing import OrderedDict def map_constructor(loader, node): """ Constructs a map using OrderedDict. :param loader: YAML loader :param node: YAML node :return: OrderedDictionary data """ loader.flatten_mapping(node) return OrderedDict(loader.construct_pairs(node))
21bf92d0c3975758ae434026fae3f54736b7f21d
6,157
def quadratic_bezier(t, p0, p1, p2): """ :return: Quadratic bezier formular according to https://en.wikipedia.org/wiki/B%C3%A9zier_curve#Quadratic_B%C3%A9zier_curves """ return (1 - t) * ((1 - t) * p0 + t * p1) + t * ((1 - t) * p1 + t * p2)
ac9319683afb5b156ac40ba24865d9bc04531917
6,159
def usage_percentage(usage, limit): """Usage percentage.""" if limit == 0: return "" return "({:.0%})".format(usage / limit)
7caf98ddb37036c79c0e323fc854cbc550eaaa60
6,162
from typing import Dict def strip_empty_values(values: Dict) -> Dict: """Remove any dict items with empty or ``None`` values.""" return {k: v for k, v in values.items() if v or v in [False, 0, 0.0]}
982814edbd73961d9afa2e2389cbd970b2bc231e
6,164
def wtr_tens(P, T): """Function to Calculate Gas-Water Interfacial Tension in dynes/cm""" #P pressure, psia #T temperature, °F s74 = 75 - 1.108 * P ** 0.349 s280 = 53 - 0.1048 * P ** 0.637 if (T <= 74): sw = s74 elif(T >= 280): sw = s280 else: sw = s74 - (T - 74) * (s74 - s280) / 206 if (sw < 1): sw = 1 return sw
acbf649a8dfe1302350b35f141afc09198470d8d
6,165
import math def get_line_equation(segment_point0, segment_point1): """ Ax + By + C = 0 :param segment_point0: Point :param segment_point1: :return: A, B, C """ x0, y0 = segment_point0.px, segment_point0.py x1, y1 = segment_point1.px, segment_point1.py a, b, c = y1 - y0, x0 - x1, x1 * y0 - y1 * x0 d = math.sqrt(a * a + b * b) a, b, c = a / d, b / d, c / d return a, b, c
9e0b35f2cac4c7a5835755878fd8aa5d32735699
6,166
def keep_lesser_x0_y0_zbt0_pair_in_dict(p, p1, p2): """Defines x0, y0, and zbt0 based on the group associated with the lowest x0. Thus the new constants represent the point at the left-most end of the combined plot. :param p: plot to combine p1 and p2 into :param p1: 1st plot to combine :param p2: 2nd plot to combine :return: p, after its const_dict has been updated """ const_dict = p[3] cd1, cd2 = p1[3], p2[3] if 'x0' in cd1 and 'x0' in cd2: if cd2['x0'] < cd1['x0']: const_dict['x0'] = cd2['x0'] const_dict['y0'] = cd2['y0'] if 'y0' in cd2 else None const_dict['zbt0'] = cd2['zbt0'] if 'zbt0' in cd2 else None else: const_dict['x0'] = cd1['x0'] const_dict['y0'] = cd1['y0'] if 'y0' in cd1 else None const_dict['zbt0'] = cd1['zbt0'] if 'zbt0' in cd1 else None p = p[0:3] + (const_dict,) return p
4dc7c008e86606b4257980f59b12fc6a183e060f
6,167
def two_sum_v1(array, target): """ For each element, find the complementary value and check if this second value is in the list. Complexity: O(n²) """ for indice, value in enumerate(array): second_value = target - value # Complexity of in is O(n). https://stackoverflow.com/questions/13884177/complexity-of-in-operator-in-python if second_value in array: return [indice, array.index(second_value)] else: return None
0dcc3b4a10ac4c04cabd4ab09a9e71f739455f55
6,168
import yaml def python_packages(): """ Reads input.yml and returns a list of python related packages """ with open(r"tests/input.yml") as file: inputs = yaml.load(file, Loader=yaml.FullLoader) return inputs["python_packages"]
91889c21b1553f9b09c451913e658b458c4502d0
6,169
def metric_wind_dict_to_beaufort(d): """ Converts all the wind values in a dict from meters/sec to the corresponding Beaufort scale level (which is not an exact number but rather represents a range of wind speeds - see: https://en.wikipedia.org/wiki/Beaufort_scale). Conversion table: https://www.windfinder.com/wind/windspeed.htm :param d: the dictionary containing metric values :type d: dict :returns: a dict with the same keys as the input dict and values converted to Beaufort level """ result = {} for key, value in d.items(): if key != 'deg': # do not convert wind degree if value <= 0.2: bf = 0 elif 0.2 < value <= 1.5: bf = 1 elif 1.5 < value <= 3.3: bf = 2 elif 3.3 < value <= 5.4: bf = 3 elif 5.4 < value <= 7.9: bf = 4 elif 7.9 < value <= 10.7: bf = 5 elif 10.7 < value <= 13.8: bf = 6 elif 13.8 < value <= 17.1: bf = 7 elif 17.1 < value <= 20.7: bf = 8 elif 20.7 < value <= 24.4: bf = 9 elif 24.4 < value <= 28.4: bf = 10 elif 28.4 < value <= 32.6: bf = 11 else: bf = 12 result[key] = bf else: result[key] = value return result
b26ddb5e9c0423612a9c7086030fd77bbfa371ad
6,170
def str_igrep(S, strs): """Returns a list of the indices of the strings wherein the substring S is found.""" return [i for (i,s) in enumerate(strs) if s.find(S) >= 0] #return [i for (s,i) in zip(strs,xrange(len(strs))) if s.find(S) >= 0]
bae8afdb7d0da4eb8384c06e9f0c9bc3f6a31242
6,171
import base64 def is_base64(s): """Return True if input string is base64, false otherwise.""" s = s.strip("'\"") try: if isinstance(s, str): sb_bytes = bytes(s, 'ascii') elif isinstance(s, bytes): sb_bytes = s else: raise ValueError("Argument must be string or bytes") return base64.b64encode(base64.b64decode(sb_bytes)) == sb_bytes except Exception: return False
6ce7bc4ddc79d5d50acce35f7995033ffb7d364a
6,175
def get_mod_from_id(mod_id, mod_list): """ Returns the mod for given mod or None if it isn't found. Parameters ---------- mod_id : str The mod identifier to look for mod_list : list[DatRecord] List of mods to search in (or dat file) Returns ------- DatRecord or None Returns the mod if found, None otherwise """ for mod in mod_list: if mod['Id'] == mod_id: return mod return None
1fac309e4dfadea6da34946eb695f77cbbd61f92
6,177
import math def distance(point1, point2): """ Return the distance between two points.""" dx = point1[0] - point2[0] dy = point1[1] - point2[1] return math.sqrt(dx * dx + dy * dy)
7605d98e33989de91c49a5acf702609272cf5a68
6,178
import math def order_of_magnitude(value): """ Returns the order of magnitude of the most significant digit of the specified number. A value of zero signifies the ones digit, as would be the case in [Number]*10^[Order]. :param value: :return: """ x = abs(float(value)) offset = 0 if x >= 1.0 else -1 return int(math.log10(x) + offset)
53a4b1be76199864fee69d4333049fb1f2371e46
6,179
def cumulative_sum(t): """ Return a new list where the ith element is the sum of all elements up to that position in the list. Ex: [1, 2, 3] returns [1, 3, 6] """ res = [t[0]] for i in range(1, len(t)): res.append(res[-1] + t[i]) return res
14b2ef722f72e239d05737a7bb7b3a6b3e15305f
6,180
def has_active_lease(storage_server, storage_index, now): """ :param allmydata.storage.server.StorageServer storage_server: A storage server to use to look up lease information. :param bytes storage_index: A storage index to use to look up lease information. :param float now: The current time as a POSIX timestamp. :return bool: ``True`` if any only if the given storage index has a lease with an expiration time after ``now``. """ leases = storage_server.get_slot_leases(storage_index) return any( lease.get_expiration_time() > now for lease in leases )
544b17489bc766a15bf2eca5cddab55c1bf473dd
6,183
import os def revision_pattern_from_build_bucket_path(bucket_path): """Get the revision pattern from a build bucket path.""" return '.*?' + os.path.basename(bucket_path)
b7db362eb47531413397f0dc2079f4f7fd931d94
6,184
def img_to_square(im_pic): """ 把图片处理成正方形 :param im_pic: :return: """ w, h = im_pic.size if w >= h: w_start = (w - h) * 0.618 box = (w_start, 0, w_start + h, h) region = im_pic.crop(box) else: h_start = (h - w) * 0.618 box = (0, h_start, w, h_start + w) region = im_pic.crop(box) return region
ae672ea715cb982272eddaff0417d4f64926894c
6,185
import random def particle_movement_x(time): """ Generates a random movement in the X label Parameter: time (int): Time step Return: x (int): X position """ x = 0 directions = [1, -1] for i in range(time): x = x + random.choice(directions) return x
0dff68080dbfd56997cffb1e469390a1964a326f
6,187
def find_match_characters(string, pattern): """Find match match pattern string. Args: params: string pattern Returns: Raises: """ matched = [] last_index = 0 if not string or not pattern: return matched if string[0] != pattern[0]: return matched for c in pattern: index = string.find(c, last_index) if index < 0: return [] matched.append((c, index)) last_index = index + 1 return matched
6d3bc3844c20584038e41c22eeead7325031b647
6,188
import os def fq_classification(fqclass, verbose=False): """ Read the fastq classification file :param fqclass: the classification file that has the file name and then arbitrary classifications separated by tabs :param verbose: more output :return: a dict of the classification. Guaranteed that all have the same number of elements. """ classi = {} maxlen = 0 with open(fqclass, 'r') as f: for l in f: p = l.strip().split("\t") if len(p) > maxlen: maxlen = len(p) - 1 fname = p[0].split(os.path.sep)[-1] classi[fname] = p[1:] for i in classi: while len(classi[i]) < maxlen: classi[i].append("None") strclassi = {x:"\t".join(classi[x]) for x in classi} return strclassi
84f71e91ad9b20c5781377b05f3a72c05a6d28b5
6,190
def remove_keys_from_array(array, keys): """ This function... :param array: :param keys: :return: """ for key in keys: array.remove(key) return array
3143b8e42eb1e1b2f5818a254bcec3631c30f5ea
6,191
def _format_td(timedelt): """Format a timedelta object as hh:mm:ss""" if timedelt is None: return '' s = int(round(timedelt.total_seconds())) hours = s // 3600 minutes = (s % 3600) // 60 seconds = (s % 60) return '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
071f25c3c8cfc75cacf2fedc7002527897362654
6,193
def data_index(data, key): """Indexing data for key or a list of keys.""" def idx(data, i): if isinstance(i, int): return data[i] assert isinstance(data, dict) if i in data: return data[i] for k, v in data.items(): if str(k) == str(i): return v raise ValueError("{} is not found".format(i)) if isinstance(key, (list, tuple)): keys = list(key) if len(keys) > 1: return data_index(idx(data, keys[0]), keys[1:]) return idx(data, keys[0]) return idx(data, key)
f2b6d18bcd83eb0ffd9b355643e79b40459d8d6a
6,197
def extract_protein_from_record(record): """ Grab the protein sequence as a string from a SwissProt record :param record: A Bio.SwissProt.SeqRecord instance :return: """ return str(record.sequence)
a556bd4316f145bf23697d8582f66f7dcb589087
6,198
import torch def calc_IOU(seg_omg1: torch.BoolTensor, seg_omg2: torch.BoolTensor, eps: float = 1.e-6) -> float: """ calculate intersection over union between 2 boolean segmentation masks :param seg_omg1: first segmentation mask :param seg_omg2: second segmentation mask :param eps: eps for numerical stability :return: IOU """ dim = [1, 2, 3] if len(seg_omg1.shape) == 4 else [1, 2] intersection = (seg_omg1 & seg_omg2).sum(dim=dim) union = (seg_omg1 | seg_omg2).sum(dim=dim) return (intersection.float() / (union.float() + eps)).mean().item()
6586b1f9995858be9ab7e40edd1c3433cd1cd6f4
6,199
def td_path_join(*argv): """Construct TD path from args.""" assert len(argv) >= 2, "Requires at least 2 tdpath arguments" return "/".join([str(arg_) for arg_ in argv])
491f1d50767a50bfbd7d3a2e79745e0446f5204c
6,200
import torch def calculate_segmentation_statistics(outputs: torch.Tensor, targets: torch.Tensor, class_dim: int = 1, threshold=None): """Compute calculate segmentation statistics. Args: outputs: torch.Tensor. targets: torch.Tensor. threshold: threshold for binarization of predictions. class_dim: indicates class dimension (K). Returns: True positives , false positives , false negatives for segmentation task. """ num_dims = len(outputs.shape) assert num_dims > 2, "Found only two dimensions, shape should be [bs , C , ...]" # noqa: S101 assert outputs.shape == targets.shape, "shape mismatch" # noqa: S101 if threshold is not None: outputs = (outputs > threshold).float() dims = [dim for dim in range(num_dims) if dim != class_dim] true_positives = torch.sum(outputs * targets, dim=dims) false_positives = torch.sum(outputs * (1 - targets), dim=dims) false_negatives = torch.sum(targets * (1 - outputs), dim=dims) return true_positives, false_positives, false_negatives
ccc017dd5c7197565e54c62cd83eb5cdc02d7d17
6,201
def get_finger_distal_angle(x,m): """Gets the finger angle th3 from a hybrid state""" return x[2]
f93b1931f3e4a9284ccac3731dfeea21526ea07c
6,202
def merge(pinyin_d_list): """ :rtype: dict """ final_d = {} for overwrite_d in pinyin_d_list: final_d.update(overwrite_d) return final_d
512f551620ccedae8fb53f0c60f7caf931aae249
6,203
import torch def polar2cart(r, theta): """ Transform polar coordinates to Cartesian. Parameters ---------- r, theta : floats or arrays Polar coordinates Returns ------- [x, y] : floats or arrays Cartesian coordinates """ return torch.stack((r * theta.cos(), r * theta.sin()), dim=-1).squeeze()
c13225a49d6435736bf326f70af5f6d4039091d8
6,204
import torch def sum_log_loss(logits, mask, reduction='sum'): """ :param logits: reranking logits(B x C) or span loss(B x C x L) :param mask: reranking mask(B x C) or span mask(B x C x L) :return: sum log p_positive i over all candidates """ num_pos = mask.sum(-1) # B x C gold_scores = logits.masked_fill(~(mask.bool()), 0) gold_scores_sum = gold_scores.sum(-1) # BxC all_log_sum_exp = torch.logsumexp(logits, -1) # B x C # gold_log_probs = gold_scores_sum - all_log_sum_exp * num_pos gold_log_probs = gold_scores_sum/num_pos - all_log_sum_exp loss = -gold_log_probs.sum() if reduction == 'mean': loss /= logits.size(0) return loss
88a312f74e7d4dce95d8dcadaeeaa1a136fceca6
6,206
def backoff_linear(n): """ backoff_linear(n) -> float Linear backoff implementation. This returns n. See ReconnectingWebSocket for details. """ return n
a3a3b3fc0c4a56943b1d603bf7634ec50404bfb3
6,207
def check_dna_sequence(sequence): """Check if a given sequence contains only the allowed letters A, C, T, G.""" return len(sequence) != 0 and all(base.upper() in ['A', 'C', 'T', 'G'] for base in sequence)
2f561c83773ddaaad2fff71a6b2e5d48c5a35f87
6,209
import os import base64 def decode_json(filepath="stocks.json"): """ Description: Generates a pathname to the service account json file needed to access the google calendar """ # Check for stocks file if os.path.exists(filepath): return filepath creds = os.environ.get("GOOGLE_SERVICE_CREDS") if creds is None: print("CREDENTIALS NOT AVAILABLE") exit(1) # get base64 string message_bytes = base64.b64decode(creds) decoded_string = message_bytes.decode("ascii") # Output to decoded string to json file with open(filepath, "w") as service_file: service_file.write(decoded_string) return filepath
095dabf2a397576289bf1754f4eae4406e6648c1
6,210
import argparse def args_parser_test(): """ returns argument parser object used while testing a model """ parser = argparse.ArgumentParser() parser.add_argument('--architecture', type=str, metavar='arch', required=True, help='neural network architecture [vgg19, resnet50]') parser.add_argument('--dataset',type=str, required=True, help='dataset [cifar10, cifar100, svhn, fashionmnist]') parser.add_argument('--batch-size', type=int, default=512, help='input batch size for training (default: 512)') parser.add_argument('--model-path',type=str, required=True, help='path to the model for finding test accuracy') return parser
77ce5f9cacd8cd535727fa35e8c9fb361324a29a
6,211
def page(token): """``page`` property validation.""" if token.type == 'ident': return 'auto' if token.lower_value == 'auto' else token.value
5b120a8548d2dbcbdb080d1f804e2b693da1e5c4
6,212
import os def create_fsns_label(image_dir, anno_file_dirs): """Get image path and annotation.""" if not os.path.isdir(image_dir): raise ValueError(f'Cannot find {image_dir} dataset path.') image_files_dict = {} image_anno_dict = {} images = [] img_id = 0 for anno_file_dir in anno_file_dirs: anno_file = open(anno_file_dir, 'r').readlines() for line in anno_file: file_name = line.split('\t')[0] labels = line.split('\t')[1].split('\n')[0] image_path = os.path.join(image_dir, file_name) if not os.path.isfile(image_path): print(f'Cannot find image {image_path} according to annotations.') continue if labels: images.append(img_id) image_files_dict[img_id] = image_path image_anno_dict[img_id] = labels img_id += 1 return images, image_files_dict, image_anno_dict
346e5a331a03d205113327abbd4d29b9817cc96c
6,213
def dest_in_spiral(data): """ The map of the circuit consists of square cells. The first element in the center is marked as 1, and continuing in a clockwise spiral, the other elements are marked in ascending order ad infinitum. On the map, you can move (connect cells) vertically and horizontally. For example, the distance between cells 1 and 9 is two moves and the distance between 24 and 9 is one move. You must help Nikola find the distance between any two elements on the map. Input: A list of two marks of cells (integers). Output: The distance between the two elements. An Integer. Find the nearest square number that the larger of the two numbers is less than. if the nearest square number is odd it can move down sqrt(nearestsquare)-1 digits and then left the same number. determine it's location with 1 being the origin """ a,b=max(data),min(data) nearestSquare=lambda x: int(x**0.5) if (float(int(x**0.5))==x**0.5) else 1+int(x**0.5) NRA=nearestSquare(a) # nearest square of a NSA=NRA**2 # nearest root of a NRB=nearestSquare(b) NSB=NRB**2 stepsfromNSA=NSA-a if NRA%2!=0: if stepsfromNSA>(NRA-1): aY=0 aX=stepsfromNSA-(NRA-1) else: aX=0 aY=(NRA-1)-stepsfromNSA else: if stepsfromNSA>(NRA-1): aY=NRA-1 aX=(NRA-1)-(stepsfromNSA-(NRA-1)) else: aX=NRA-1 aY=stepsfromNSA offset=(NRA-NRB)/2 if (NRB%2==0 and NRB%2 != NRA %2): offset+=1 stepsfromNSB=NSB-b if NRB%2!=0: if stepsfromNSB>(NRB-1): bY=0 bX=stepsfromNSB-(NRB-1) else: bX=0 bY=(NRB-1)-stepsfromNSB else: if stepsfromNSB>(NRB-1): bY=NRB-1 bX=(NRB-1)-(stepsfromNSB-(NRB-1)) else: bX=NRB-1 bY=stepsfromNSB bX,bY= bX+offset, bY+offset distance=(((aX-bX)**2)**0.5)+(((aY-bY)**2)**0.5) return distance
a84a00d111b80a3d9933d9c60565b7a31262f878
6,215
def skin_base_url(skin, variables): """ Returns the skin_base_url associated to the skin. """ return variables \ .get('skins', {}) \ .get(skin, {}) \ .get('base_url', '')
80de82862a4a038328a6f997cc29e6bf1ed44eb8
6,216
import random def random_tolerance(value, tolerance): """Generate a value within a small tolerance. Credit: /u/LightShadow on Reddit. Example:: >>> time.sleep(random_tolerance(1.0, 0.01)) >>> a = random_tolerance(4.0, 0.25) >>> assert 3.0 <= a <= 5.0 True """ value = float(value) if tolerance == 0.0: return value return value + value * random.uniform(-tolerance, tolerance)
abe631db8a520de788540f8e0973537306872bde
6,217
def find_scan_info(filename, position = '__P', scan = '__S', date = '____'): """ Find laser position and scan number by looking at the file name """ try: file = filename.split(position, 2) file = file[1].split(scan, 2) laser_position = file[0] file = file[1].split(date, 2) scan_number = file[0] except IndexError: laser_position = -1 scan_number = -1 return laser_position, scan_number
f98afb440407ef7eac8ceda8e15327b5f5d32b35
6,218
import os def get_circuitpython_version(device_path): """ Returns the version number of CircuitPython running on the board connected via ``device_path``. This is obtained from the ``boot_out.txt`` file on the device, whose content will start with something like this:: Adafruit CircuitPython 4.1.0 on 2019-08-02; :param str device_path: The path to the connected board. :return: The version string for CircuitPython running on the connected board. """ with open(os.path.join(device_path, "boot_out.txt")) as boot: circuit_python, _ = boot.read().split(";") return circuit_python.split(" ")[-3]
ce4d407062566cd42473d2cef8d18024b0098b69
6,221
def reverse_preorder(root): """ @ input: root of lcrs tree @ output: integer list of id's reverse preorder """ node_list = [] temp_stack = [root] while len(temp_stack) != 0: curr = temp_stack.pop() node_list.append(curr.value) if curr.child is not None: temp_stack.append(curr.child) if curr.next is not None: temp_stack.append(curr.next) return node_list
06a53756db0f5c990537d02de4fcaa57cc93169d
6,225
def run_services(container_factory, config, make_cometd_server, waiter): """ Returns services runner """ def _run(service_class, responses): """ Run testing cometd server and example service with tested entrypoints Before run, the testing cometd server is preloaded with passed responses. """ cometd_server = make_cometd_server(responses) container = container_factory(service_class, config) cometd_server.start() container.start() waiter.wait() container.kill() cometd_server.stop() return _run
df7d1c3fdf7e99ebf054cfc6881c8073c2cf4dee
6,226
import requests def cleaned_request(request_type, *args, **kwargs): """ Perform a cleaned requests request """ s = requests.Session() # this removes netrc checking s.trust_env = False return s.request(request_type, *args, **kwargs)
b6c99c85a64e5fd78cf10cc986c9a4b1542f47d3
6,227
import yaml def load_config_file(filename): """Load configuration from YAML file.""" docs = yaml.load_all(open(filename, 'r'), Loader=yaml.SafeLoader) config_dict = dict() for doc in docs: for k, v in doc.items(): config_dict[k] = v return config_dict
d61bb86e605a1e744ce3f4cc03e866c61137835d
6,228
def empty_filter(item, *args, **kwargs): """ Placeholder function to pass along instead of filters """ return True
d72ac5a0f787557b78644bcedd75e71f92c38a0b
6,231
def RAND_egd(path): # real signature unknown; restored from __doc__ """ RAND_egd(path) -> bytes Queries the entropy gather daemon (EGD) on the socket named by 'path'. Returns number of bytes read. Raises SSLError if connection to EGD fails or if it does not provide enough data to seed PRNG. """ return ""
5ef4e3e065c44058996c1793541cd9f2a599b106
6,232
from typing import List from typing import Dict from typing import Any def get_types_map(types_array: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]: """Get the type name of a metadata or a functionality.""" return {type_["name"]: type_ for type_ in types_array}
9354eff434b589a19360ee13d8bf7d9ab9e1002d
6,233
def dice_loss(pred, target, smooth=1.): """Dice loss """ pred = pred.contiguous() target = target.contiguous() intersection = (pred * target).sum(dim=2).sum(dim=2) loss = (1 - ((2. * intersection + smooth) / (pred.sum(dim=2).sum(dim=2) + target.sum(dim=2).sum(dim=2) + smooth))) return loss.mean()
5879769ac379395e35f9accda9d917094aa07301
6,234
import re def remove_mentions(text): """Remove @-mentions from the text""" return re.sub('@\w+', '', text)
5cbdd40a602f24f8274369e92f9159cbb2f6a230
6,235
def _flat(l): """Flattens a list. """ f = [] for x in l: f += x return f
9b2e432d79f08840d417601ff950ff9fa28073ef
6,236
def axesDict(T_axes): """Check connectivity based on Interval Vectors.""" intervalList = [ T_axes[0], T_axes[1], T_axes[2], (12 - T_axes[0]), (12 - T_axes[1]), (12 - T_axes[2])] return intervalList
6b1e8c59d12a3c2c548b95f3bcd8d7a3de4ef931
6,237
def is_no_entitled(request): """Check condition for needing to entitled user.""" no_entitled_list = ["source-status"] no_auth = any(no_auth_path in request.path for no_auth_path in no_entitled_list) return no_auth
feee0962568b20c685fd85096ce00dbb91b91fe5
6,238
from typing import Union from pathlib import Path def find_mo(search_paths=None) -> Union[Path, None]: """ Args: search_paths: paths where ModelOptimizer may be found. If None only default paths is used. Returns: path to the ModelOptimizer or None if it wasn't found. """ default_mo_path = ('intel', 'openvino', 'deployment_tools', 'model_optimizer') default_paths = [Path.home().joinpath(*default_mo_path), Path('/opt').joinpath(*default_mo_path)] executable = 'mo.py' for path in search_paths or default_paths: path = Path(path) if not path.is_dir(): continue mo = path / executable if not mo.is_file(): continue return mo return None
4657e15649692415dd10f2daa6527cade351d8fc
6,241
import math def point_in_wave(point_x, frequency, amplitude, offset_x, offset_y): """Returns the specified point x in the wave of specified parameters.""" return (math.sin((math.pi * point_x)/frequency + offset_x) * amplitude) + offset_y
5a91c9204819492bb3bd42f0d4c9231d39e404d8
6,245
def map_to_docs(solr_response): """ Response mapper that only returns the list of result documents. """ return solr_response['response']['docs']
2661b9075c05a91c241342151d713702973b9c12
6,246
def get_config_type(service_name): """ get the config type based on service_name """ if service_name == "HDFS": type = "hdfs-site" elif service_name == "HDFS": type = "core-site" elif service_name == "MAPREDUCE": type = "mapred-site" elif service_name == "HBASE": type = "hbase-site" elif service_name == "OOZIE": type = "oozie-site" elif service_name == "HIVE": type = "hive-site" elif service_name == "WEBHCAT": type = "webhcat-site" else: type = "global" return type
96793f932334eb8e4a5460767a80ee6a989cee22
6,247
def sync_filter(func, *iterables): """ Filter multiple iterable at once, selecting values at index i such that func(iterables[0][i], iterables[1][i], ...) is True """ return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len( iterables )
7a2ab5e6356dadff0fe78d3f2bb0da584e0ff41b
6,249
import json def generate_prompt( test_case_path, prompt_path, solutions_path, tokenizer, starter_path=None ): """ Generate a prompt for a given test case. Original version from https://github.com/hendrycks/apps/blob/main/eval/generate_gpt_codes.py#L51. """ _input = "\nQUESTION:\n" with open(prompt_path, "r") as f: data = f.readlines() data = "".join(data) _input += data if starter_path != None: with open(starter_path, "r") as f: data = f.readlines() data = "".join(data) data = "\n" + data # + "\n" _input += data else: # _input += "\n\n" pass with open(test_case_path, "r") as f: data = json.load(f) if not data.get("fn_name"): _input += "\nUse Standard Input format" # \n" else: _input += "\nUse Call-Based format" # \n" _input += "\nANSWER:\n" return _input
ecd3218839b346741e5beea8ec7113ea2892571e
6,252
def copy_emb_weights(embedding, idx2word, embedding_weights, emb_index_dict, vocab_size): """Copy from embs weights of words that appear in our short vocabulary (idx2word).""" c = 0 for i in range(vocab_size): w = idx2word[i] g = emb_index_dict.get(w, emb_index_dict.get(w.lower())) if g is None and w.startswith('#'): # glove has no hastags (I think...) w = w[1:] g = emb_index_dict.get(w, emb_index_dict.get(w.lower())) if g is not None: embedding[i, :] = embedding_weights[g, :] c += 1 print('number of tokens, in small vocab: {:,} found in embeddings and copied to embedding: {:.4f}'.format(c, c / float(vocab_size))) return embedding
e5d361efd342cc7e194ee325fdf4a98831121576
6,255
def format_header(header_values): """ Formats a row of data with bolded values. :param header_values: a list of values to be used as headers :return: a string corresponding to a row in enjin table format """ header = '[tr][td][b]{0}[/b][/td][/tr]' header_sep = '[/b][/td][td][b]' return header.format(header_sep.join(header_values))
5b7cd734a486959660551a6d915fbbf52ae7ef1e
6,258
import importlib def load_attr(str_full_module): """ Args: - str_full_module: (str) correspond to {module_name}.{attr} Return: the loaded attribute from a module. """ if type(str_full_module) == str: split_full = str_full_module.split(".") str_module = ".".join(split_full[:-1]) str_attr = split_full[-1] module = importlib.import_module(str_module) return getattr(module, str_attr) else: return str_full_module
f96dd56c73745e76ccc9c48dda4ba8a6592ab54b
6,259
def sort_dictionary_by_keys(input_dict): """ Sort the dictionary by keys in alphabetical order """ sorted_dict = {} for key in sorted(input_dict.keys()): sorted_dict[key] = input_dict[key] return sorted_dict
225df2c16d2b21740603c224319ad4b0eaa0899d
6,260
def quick_sort(seq): """ Реализация быстрой сортировки. Рекурсивный вариант. :param seq: любая изменяемая коллекция с гетерогенными элементами, которые можно сравнивать. :return: коллекция с элементами, расположенными по возрастанию. Examples: >>> quick_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> quick_sort([]) [] >>> quick_sort([-2, -5, -45]) [-45, -5, -2] """ length = len(seq) if length <= 1: return seq else: # В качестве pivot используется последний элемент. pivot = seq.pop() # lesser - часть коллекции, которая меньше pivot, будет тут. # greater - части коллекции, которая меньше pivot, будет тут. greater, lesser = [], [] for element in seq: if element > pivot: greater.append(element) else: lesser.append(element) # Рекурсивно вызывается функция сортировки отдельно для # greater и lesser. В конце все части объединяются в единую # коллекцию. Между ними вставляется pivot. return quick_sort(lesser) + [pivot] + quick_sort(greater)
46b56b5d29ca31a872e1805b66f4529a8bf48c6b
6,261
def normalize_query_result(result, sort=True): """ Post-process query result to generate a simple, nested list. :param result: A QueryResult object. :param sort: if True (default) rows will be sorted. :return: A list of lists of RDF values. """ normalized = [[row[i] for i in range(len(row))] for row in result] return sorted(normalized) if sort else normalized
1df57ef889be041c41593766e1ce3cdd4ada7f66
6,262
from typing import List def count_jobpairs(buildpairs: List) -> int: """ :param buildpairs: A list of build pairs. :return: The number of job pairs in `buildpairs`. """ counts = [len(bp['jobpairs']) for bp in buildpairs] return sum(counts)
30c345698400fd134456abcf7331ca2ebbfec10f
6,263
from typing import Any from typing import get_args def make_hetero_tuple_unstructure_fn(cl: Any, converter, unstructure_to=None): """Generate a specialized unstructure function for a heterogenous tuple.""" fn_name = "unstructure_tuple" type_args = get_args(cl) # We can do the dispatch here and now. handlers = [ converter._unstructure_func.dispatch(type_arg) for type_arg in type_args ] globs = {f"__cattr_u_{i}": h for i, h in enumerate(handlers)} if unstructure_to is not tuple: globs["__cattr_seq_cl"] = unstructure_to or cl lines = [] lines.append(f"def {fn_name}(tup):") if unstructure_to is not tuple: lines.append(" res = __cattr_seq_cl((") else: lines.append(" res = (") for i in range(len(handlers)): if handlers[i] == converter._unstructure_identity: lines.append(f" tup[{i}],") else: lines.append(f" __cattr_u_{i}(tup[{i}]),") if unstructure_to is not tuple: lines.append(" ))") else: lines.append(" )") total_lines = lines + [" return res"] eval(compile("\n".join(total_lines), "", "exec"), globs) fn = globs[fn_name] return fn
a1ffa13bcf6488a79c6aacafd6f1e12112f99bb2
6,265
def calc_water(scenario, years, days_in_year): """Calculate Water costs Function Args: scenario (object): The farm scenario years (int): The no. of years the simulation will analyse days_in_year (float): The number of days in a year Returns: cogs_water (list): Cost of Goods Sold expenditure on Water as a time series for each year water_consumption (list): The amount of water consumed each year """ water_consumption = [0] for y in range(years+1): if y == 1: water_consumption.append(scenario.system_quantity * 0.95 * days_in_year + (1900*12)) elif y > 1: water_consumption.append((scenario.system_quantity * 0.95 * days_in_year + (1900*12)) * scenario.growing_area_mulitplier) cogs_water = [i * scenario.water_price for i in water_consumption] return cogs_water, water_consumption
ed23060e64c928a545897edef008b8b020d84d3c
6,266
def interpolate_force_line2(form, x, tol=1E-6): """Interpolates a new point in a form polyline Used by the `add_force_line` function (I think it is assumed that the ) """ if len(form) < 1: raise ValueError('interpolate_force_line2 : form must not be an empty list') form_out1 = [form[0]] form_out2 = [] for pt1, pt2 in zip(form[:-1], form[1:]): if (x - pt1[0] > 0.5 * tol and pt2[0] - x > 0.5 * tol): y = pt1[1] + (x - pt1[0]) * (pt2[1] - pt1[1]) / (pt2[0] - pt1[0]) form_out1.extend([[x, y]]) form_out2.extend([[x, y]]) if x - pt2[0] >= 0.5 * tol: form_out1.append(pt2) else: form_out2.append(pt2) # problems arise if form_out2 is an empty list return form_out1, form_out2
82a0eac9132b7e631fd395bddf87595385cae574
6,267
import json def enqueue_crawling_job(delegate_or_broadcast_svc, job_id, urls, depth): """ Used to enqueue a crawling job (or delegate a sub-url on a current job) to the worker pool. :type delegate_or_broadcast_svc: ZeroMQDelegatorService or ZeroMQBroadcastService. :param delegate_or_broadcast_svc: The web API service uses a ZeroMQBroadcastService to announce new crawling jobs. The crawler service uses ZeroMQDelegatorService to delegate any sub-links found while scouring a page. :param int job_id: The job ID that these URLs fall under. :param set urls: The URLs to crawl. We'll send out one announcement per URL. :param int depth: The depth that this crawl will be at. 0 being initial. :rtype: int :returns: The number of crawler announcements made. One per URL. """ message_dict = { 'job_id': job_id, 'depth': depth } for url in urls: message_dict['url'] = url message_str = json.dumps(message_dict) delegate_or_broadcast_svc.send_message(message_str) return len(urls)
6a211346edd6f921bf26ed08adcee98cff066764
6,268
import pytz def getLocalTime(utc_dt, tz): """Return local timezone time """ local_tz = pytz.timezone(tz) local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz) return local_dt
70789f61a90d991714fafe3c15917d1f1113fe8f
6,269
import re def camel_to_snake(text: str) -> str: """ A helper function to convert `camelCase` to `snake_case`. - e.g. `bestBigBrawlerTime` -> `best_big_brawler_time` ### Parameters text: `str` The text to restructure from `camelCase` to `snake_case`. ### Returns `str` The restructured `snake_case` text. """ return re.compile(r"(?<!^)(?=[A-Z])").sub("_", text).lower()
b9ac748bf0cc345c7cfb0bade1e4b1e9cbdf712c
6,272
def dict_to_config_str(config_dict): """Produces a version of a dict with keys (and some values) replaced with shorter string version to avoid problems with over long file names in tensorboard""" key_abrv = { "embedding_dimension": "ed", "loss_type": "lt", "initialize_uniform": "iu", "k_negative_samples": "ns", "distance_measure": "dm", "margin": "mrgn", "sample_negative_relations": "snr", "bias": "b", "feature_map_dimension": "fmd", "fix_conv_layers": "fcl", "fix_structure_embeddings": "fse", "fix_word_embeddings": "fwd", "pretrained_word_embeddings": "pwe", "max_words_per_sentence": "mwps", "vocab_dim": "vd", "filter_sizes": "fs", "dropout_keep_prob": "dkp", "description_mode": "dm", "l1_kernel_size": "l1ks", "l2_kernel_size": "l2ks", "entity_wd_type": "eWDt", "rel_wd_type": "rWDt", "type_wd": "tWD", "type_rel_wd": "trWD", "filt_top_t": "ftt", "filt_btm_t": "fbt", "emb_dropout": "edp" } val_abrv = { None: "X", False: "F", True: "T", "softplus": "sp" } entries = [] for name, value in config_dict.items(): key = key_abrv[name] if name in key_abrv else name if type(value) == str or type(value) == bool: value = val_abrv[value] if value in val_abrv else value if type(value) == list: value = "L" + "-".join([str(v) for v in value]) + "L" # Skip (='delete') variable_device, no ones cares and the escape symbol messes # with the generated file path if key == "variable_device": continue entries.append((key, value)) return entries
da7d4ae8a58c2dab2d07616ae25438c8c0e0252d
6,274
import ast def extract_ast_class_def_by_name(ast_tree, class_name): """ Extracts class definition by name :param ast_tree: AST tree :param class_name: name of the class. :return: class node found """ class ClassVisitor(ast.NodeVisitor): """ Visitor. """ def __init__(self): self.found_class_node = None def visit_ClassDef(self, node): # pylint: disable=invalid-name """ Visit class definition. :param node: node. :return: """ if node.name == class_name: self.found_class_node = node visitor = ClassVisitor() visitor.visit(ast_tree) return visitor.found_class_node
011f1cb8d965db8e30e6f4281704a6140103946b
6,276
def default_tiling(): """Return default tiling options for GeoTIFF driver. Returns ------- dict GeoTIFF driver tiling options. """ return {"tiled": True, "blockxsize": 256, "blockysize": 256}
c2d78f2d87478121cc52124d0b33edde5850a10a
6,277
def quantize_sequences(sequences, alphabet): """Giving prescribing alphabet, quantize each caracter using index in alphabet in each sequence. input: sequences: [str] return: [[int]] """ print("quantizing sequences...") new_sequences = [] for sequence in sequences: new_sequence = [] # add width to fit the conv2D of TF for character in sequence.lower(): if character in alphabet: new_sequence.append(alphabet.index(character)) else: new_sequence.append(len(alphabet)) new_sequences.append(new_sequence) return new_sequences
7b8a870d72d6b0a9568fba8d96a1d3c2e422ff59
6,279
from typing import Callable def _scroll_screen(direction: int) -> Callable: """ Scroll to the next/prev group of the subset allocated to a specific screen. This will rotate between e.g. 1->2->3->1 when the first screen is focussed. """ def _inner(qtile): if len(qtile.screens) == 1: current = qtile.groups.index(qtile.current_group) destination = (current + direction) % 6 qtile.groups[destination].cmd_toscreen() return current = qtile.groups.index(qtile.current_group) if current < 3: destination = (current + direction) % 3 else: destination = ((current - 3 + direction) % 3) + 3 qtile.groups[destination].cmd_toscreen() return _inner
e778b6ef8a07fe8609a5f3332fa7c44d1b34c17a
6,280
def check_players(instance): """ Checks to see if any of the starting players have left. Args: instance: The GameInstance model for this operation. If a player has left the game, they are invited back and a ValueError is raised. Raises: ValueError if a player has left the game. """ if len(instance.players) < len(instance.starting_players): for starting_player in instance.starting_players: if starting_player not in instance.players: instance.invited.append(starting_player) return ('%s left during your game. They have ' % starting_player + 'been invited and must rejoin before continuing.') return False
d3a31f17cf5d3dee2e3fd075cea2e31d8a806952
6,281
import sysconfig def shared_libraries_are_available(): """ check if python was built with --enable-shared or if the system python (with dynamically linked libs) is in use default to guessing that the shared libs are not available (be conservative) """ # if detection isn't possible because sysconfig isn't available (py2) then fail if not sysconfig: return False enable_shared = sysconfig.get_config_var("Py_ENABLE_SHARED") return enable_shared == 1
65306cc5bda77f07cc6dc118637d3fec7cae47c0
6,285
import torch def decorate_batch(batch, device='cpu'): """Decorate the input batch with a proper device Parameters ---------- batch : {[torch.Tensor | list | dict]} The input batch, where the list or dict can contain non-tensor objects device: str, optional 'cpu' or 'cuda' Raises: ---------- Exception: Unsupported data type Return ---------- torch.Tensor | list | dict Maintain the same structure as the input batch, but with tensors moved to a proper device. """ if isinstance(batch, torch.Tensor): batch = batch.to(device) return batch elif isinstance(batch, dict): for key, value in batch.items(): if isinstance(value, torch.Tensor): batch[key] = value.to(device) elif isinstance(value, dict) or isinstance(value, list): batch[key] = decorate_batch(value, device) # retain other value types in the batch dict return batch elif isinstance(batch, list): new_batch = [] for value in batch: if isinstance(value, torch.Tensor): new_batch.append(value.to(device)) elif isinstance(value, dict) or isinstance(value, list): new_batch.append(decorate_batch(value, device)) else: # retain other value types in the batch list new_batch.append(value) return new_batch else: raise Exception('Unsupported batch type {}'.format(type(batch)))
a0bd4a5dff0b5cf6e304aede678c5d56cb93d1dd
6,286
import io def read_bytes(n: int, reader: io.IOBase) -> bytes: """ Reads the specified number of bytes from the reader. It raises an `EOFError` if the specified number of bytes is not available. Parameters: - `n`: The number of bytes to read; - `reader`: The reader; Returns the bytes read. """ buff = reader.read(n) if not isinstance(buff, bytes): raise ValueError('The reader is expected to return bytes.') if len(buff) != n: raise EOFError(f'Unable to read {n} bytes from the stream.') return buff
bb3d00fc7667839864f4104a94a26e682f058fdc
6,287
import json def _format_full_payload(_json_field_name, _json_payload, _files_payload): """This function formats the full payload for a ``multipart/form-data`` API request including attachments. .. versionadded:: 2.8.0 :param _json_field_name: The name of the highest-level JSON field used in the JSON payload :type _json_field_name: str :param _json_payload: The JSON payload data as a dictionary :type _json_payload: dict :param _files_payload: The payload for the attachments containing the IO stream for the file(s) :type _files_payload: dict :returns: The full payload as a dictionary :raises: :py:exc:`TypeError` """ _full_payload = { _json_field_name: (None, json.dumps(_json_payload, default=str), 'application/json') } _full_payload.update(_files_payload) return _full_payload
feacd27be3e6fcbd33f77fa755be513a93e3cdeb
6,288
import os def make_output_dirs(model_name, dat, let): """ Generate output directories of the run corresponding to - model_name - dat - let 0 - output_dir 1 - samples_output_dir 2 - enkf_output_dir """ output_dir = (os.environ['HOME'] + "/shematOutputDir/" + model_name + "_output/" + dat + "/" + dat + "_" + let) samples_output_dir = (os.environ['HOME'] + "/shematOutputDir/" + model_name + "_output/" + dat + "/" + dat + "_" + let + "/samples_output") enkf_output_dir = (os.environ['HOME'] + "/shematOutputDir/" + model_name + "_output/" + dat + "/" + dat + "_" + let + "/enkf_output") return output_dir, \ samples_output_dir, \ enkf_output_dir
2804bff3d1da0aae85e133e985bb526859116388
6,289
from typing import Any from typing import Union import torch def tocuda(vars: Any) -> Union[str, torch.Tensor]: """Convert tensor to tensor on GPU""" if isinstance(vars, torch.Tensor): return vars.cuda() elif isinstance(vars, str): return vars else: raise NotImplementedError("invalid input type {} for tocuda".format(type(vars)))
b7be275fe7e909fa54fc62ed9e5fbe61d3ff4863
6,290
def read_slug(filename): """ Returns the test slug found in specified filename. """ with open(filename, "r") as f: slug = f.read() return slug
e1882d856e70efa8555dab9e422a1348594ffcaf
6,291
def preprocess_img(img): """Preprocessing function for images.""" return img/255
11651a809288d5c3aa776b318099b7eb750d28ec
6,292
import os def _get_next_traj_id(root_data_dir='data'): """ Resolve what is the next trajectory number """ if not os.path.exists(os.path.join(root_data_dir, 'screens')): return 0 return 1 + max([ int(x) for x in os.listdir(os.path.join(root_data_dir, 'screens')) ])
d321af4c90d9de78942e2526c0720b3019fff479
6,293
def munge_av_status(av_statuses): """Truncate and lowercase availability_status""" return [a[20:].lower() for a in av_statuses]
52a00fc6733015c3618a2a394371ea9387d92fc0
6,294
def cdf(vals, reverse=False): """Computes the CDF of a list of values""" vals = sorted(vals, reverse=reverse) tot = float(len(vals)) x = [] y = [] for i, x2 in enumerate(vals): x.append(x2) y.append((i+1) / tot) return x, y
3cc64dcb8876f7620f02da873e29569e77477823
6,295
import os def get_not_repeated_file_name(path_with_file): """ Returns file_name if file_name does not exist. If it exists, it appends an underscore until this new file name does not exist, returning it. For example if "/home/mine/file.txt" exists, it will return "/home/mine/_file.txt". @param path_with_file: complete path with the name of the file. """ directory, file_name = os.path.split(path_with_file) file_rename = file_name while os.path.exists(directory+"/"+file_rename): file_rename = "_"+file_rename return directory+"/"+file_rename
7da491a3cd0261d99142905e4e7d2690c3be0d06
6,296
def negative(num): """assumes num is a numeric returns a boolean, True if num is negative, else False""" return num < 0
dc8b789b6dbd4d158482de6d4af26f48f9e8cc5b
6,297
import time def get_framerate(has_already_started, start_time, frame_counter, frame_rate, frame_num=5, decimal_round_num=2): """ Returns current framerate of video based on time elapsed in frame_num frames. Works in a while loop for each frame""" if has_already_started: if frame_counter % 5 == 0: curr_time = time.time() frame_rate = frame_counter/(curr_time - start_time) frame_rate = round(frame_rate, decimal_round_num) frame_counter += 1 return has_already_started, start_time, frame_counter, frame_rate else: has_already_started = True start_time = time.time() frame_counter = 0 frame_rate = 0 return has_already_started, start_time, frame_counter, frame_rate
61db421be9e8d5a0e810a79875eac2b776be99ca
6,298