content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
|---|---|---|
def repeat(s: str, n: int) -> str:
""" Return s repeated n times; if n is negative, return the empty string.
>>> repeat('yes', 4)
'yesyesyesyes'
>>> repeat('no', 0)
''
>>> repeat('no', -2)
''
>>> repeat('yesnomaybe', 3)
'yesnomaybeyesnomaybeyesnomaybe'
"""
return s * n
|
67c31e1d824442c5cc388f190e6dd222f1b2cf83
| 8,781
|
def set_or_callable(value):
"""Convert single str or None to a set. Pass through callables and sets."""
if value is None:
return frozenset()
if callable(value):
return value
if isinstance(value, (frozenset, set, list)):
return frozenset(value)
return frozenset([str(value)])
|
d7ef01016ea0ac679cdf13e78432f5a99d991522
| 8,784
|
def add_id(pos_list, image_id=0):
"""
Add id for gather feature, for inference.
"""
new_list = []
for item in pos_list:
new_list.append((image_id, item[0], item[1]))
return new_list
|
47cfdb55392eac141a796f74c539c4b12a39cdb9
| 8,785
|
def is_empty_tensor(t):
"""Returns whether t is an empty tensor."""
return len(t.size()) == 0
|
f0caf9a7b21c77a01dc088314f2d8fbbe49cf1f3
| 8,786
|
import numpy
def sinusoid( frequency, sampling_frequency=16000, duration=0.025 ):
"""Generate a sinusoid signal.
Args:
frequency (int): the frequency of the sinusoidal signal.
sampling_frequency (int, optional): sampling frequency in Hz. Defaults to 16000.
duration (float, optional): duration of the output sinusoid in seconds. Defaults to 0.025.
Returns:
numpy.array: a sinusoid.
"""
times = numpy.arange(int(sampling_frequency * duration))
return numpy.sin(2 * numpy.pi * frequency * times / sampling_frequency)
|
b8c71c285b1e9f43806d7eb41d2e91278362b7d5
| 8,787
|
def isNotTrue (b) :
"""return True if b is not equal to True, return False otherwise
>>> isNotTrue(True)
False
>>> isNotTrue(False)
True
>>> isNotTrue("hello world")
True
"""
# take care: not(X or Y) is (not X) and (not Y)
if b is not True and b != True :
# base case: b not equals to True => return True
return True
else :
# otherwise: solve the problem recursively
return isNotTrue(not b) == (False or ...)
|
91b7aa18d6e60f13f31e3826d520f5c4083ca089
| 8,788
|
def search(variable: str, target: str) -> str:
"""Search serice using mwapi on wikidata
Args:
variable (str): variable name (?film, ?director...)
target (str): value to search for
Returns:
str: service query
"""
if variable is None or target is None:
return ""
return f"""
SERVICE wikibase:mwapi {{
bd:serviceParam wikibase:api "EntitySearch" .
bd:serviceParam wikibase:endpoint "www.wikidata.org" .
bd:serviceParam mwapi:search "{target}" .
bd:serviceParam mwapi:language "en" .
{variable} wikibase:apiOutputItem mwapi:item .
}}
"""
|
3959a6c7d93e5f61f237a019ae941702df35eb31
| 8,789
|
def postprocess_question(text):
"""postprocess the output of question generation model for fair readable.
output.
Args:
text (text): generated question to be processed.
Returns:
str: clean readable text.
"""
output = text.replace("question: ", "")
output = output.strip()
return output
|
292a0aa92cc86e411b5700352028e3d5858b3511
| 8,790
|
def get_job_data(job, average, qubit_idx, scale_factor):
"""
Retrieve data from a job that has already run.
"""
job_results = job.result(timeout=120) # Timeout after 120 s
result_data = []
for i in range(len(job_results.results)):
if average: # Get avg data
result_data.append(job_results.get_memory(i)[qubit_idx] * scale_factor)
else: # Get single data
result_data.append(job_results.get_memory(i)[:, qubit_idx] * scale_factor)
return result_data
|
748871f547d15911c889d72e6969caedc773e18f
| 8,791
|
def expand_url(url, protocol):
"""
Expands the given URL to a full URL by adding
the magento soap/wsdl parts
:param url: URL to be expanded
:param service: 'xmlrpc' or 'soap'
"""
if protocol == 'soap':
ws_part = 'api/?wsdl'
elif protocol == 'xmlrpc':
ws_part = 'index.php/api/xmlrpc'
else:
ws_part = 'index.php/rest/V1'
return url.endswith('/') and url + ws_part or url + '/' + ws_part
|
9cf96886dc6101c562a7c091ebeb12349ce52219
| 8,793
|
def inner_product(x, y):
"""Inner product."""
return x.dot(y)
|
aa56c71199863b5b8764ce8e96375c8cc61378d4
| 8,794
|
def to_millis(seconds):
"""
Converts the time parameter in seconds to milliseconds. If the given time is negative, returns the original value.
:param seconds: (Number), the given time in seconds.
:return: (int), result of the conversation in milliseconds.
"""
if seconds >= 0:
return int(seconds * 1000)
return seconds
|
818409afa643dbb8de73c35348a08508227b75a3
| 8,795
|
from pathlib import Path
import re
def find_version():
"""Retrieve the version."""
constpy = Path("dataplaybook/const.py").read_text()
version_match = re.search(r"^VERSION = ['\"]([^'\"]+)['\"]", constpy, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
|
a7766a2a3977e0ba2ace33a4c4e9aad64930ace2
| 8,796
|
def escape_nl(msg):
"""
It's nice to know if we actually sent a complete line ending in
\n, so escape it for display.
"""
if msg != '' and msg[-1] == "\n":
return msg[:-1] + "\\n"
return msg
|
dc30ee05b9985eb69a4e22f2603f739788b22dc8
| 8,797
|
def noun(name: str, num: int) -> str:
"""
This function returns a noun in it's right for a specific quantity
:param name:
:param num:
:return:
"""
if num == 0 or num > 1:
return name + "s"
return name
|
ccf9fd3f459e8d8946aded4435368231d54cac9f
| 8,798
|
def get_assessor_dict(assessor_label, assessor_path):
"""
Generate the dictionary for an assessor from the folder in the queue
:param assessor_label: assessor label
:param assessor_path: assessor path on the station
:return: None
"""
assessor_dict = dict()
keys = ['project_id', 'subject_label', 'session_label', 'label',
'proctype', 'path']
labels = assessor_label.split('-x-')
if len(labels) > 3:
values = [labels[0], labels[1], labels[2], assessor_label, labels[-1],
assessor_path]
assessor_dict = dict(list(zip(keys, values)))
return assessor_dict
|
b3fd02f730ef6d966ddec562f0ed40a6ac12ef78
| 8,799
|
import torch
def _rmse(y, y_hat):
"""RMSE"""
y_hat = y_hat.unsqueeze(1) if y_hat.dim() == 1 else y_hat
y = y.unsqueeze(1) if y.dim() == 1 else y
assert y.shape == y_hat.shape
assert y.dim() == 2
assert y.shape[-1] == 1
return torch.nn.functional.mse_loss(y, y_hat).pow(0.5)
|
b44402511f0e2acf48e9c9210ffa9f023c9a5ca0
| 8,801
|
import re
def build_tx_cigar(exons, strand):
"""builds a single CIGAR string representing an alignment of the
transcript sequence to a reference sequence, including introns.
The input exons are expected to be in transcript order, and the
resulting CIGAR is also in transcript order.
>>> build_tx_cigar([], 1) is None
True
"""
cigarelem_re = re.compile(r"\d+[=DIMNX]")
def _reverse_cigar(c):
return ''.join(reversed(cigarelem_re.findall(c)))
if len(exons) == 0:
return None
# flip orientation of all CIGARs if on - strand
if strand == -1:
cigars = [_reverse_cigar(e["cigar"]) for e in exons]
else:
cigars = [e["cigar"] for e in exons]
tx_cigar = [cigars[0]] # exon 1
for i in range(1, len(cigars)): # and intron + exon pairs thereafter
intron = str(exons[i]["alt_start_i"] - exons[i - 1]["alt_end_i"]) + "N"
tx_cigar += [intron, cigars[i]]
tx_cigar_str = "".join(tx_cigar)
return tx_cigar_str
|
a05211d4dbf04fa6cef8e1e595227985de8e7a36
| 8,803
|
def DictionaryofDate_valuetoArrays(Date_value):
"""Returns (array): date, value """
date = Date_value.keys()
date.sort()
value = []
for d in date:
value.append(Date_value[d])
return date, value
|
d4ad630457fc03f9f193515ae51c52f438b0cd81
| 8,804
|
import os
def _in_load_test_mode():
"""Returns True if the default values should be used instead of the server
provided bot_config.py.
This also disables server telling the bot to restart.
"""
return os.environ.get('SWARMING_LOAD_TEST') == '1'
|
117907bf2bac25e66fdd58ce1fa5b48d68b7e0bb
| 8,805
|
import re
def cassini_time(time):
"""Parse Cassini time.
Parameters
----------
time: str, int or float
Cassini time.
Returns
-------
float
Parsed Cassini time as float.
Raises
------
ValueError
If the input time pattern is invalid.
Examples
--------
>>> cassini_time('v1487096932_1')
1487096932.0
>>> cassini_time(1483230358.172)
1483230358.172
"""
cassini_time = re.findall(r'(\d{10})(\.\d+)?', str(time))
if not cassini_time:
raise ValueError(f'Cassini time invalid: `{time}`')
return float(''.join(cassini_time[0]))
|
bc14c2803e04ed690fac75eb32d72b27a803f1ad
| 8,806
|
def pods_by_uid(pods):
"""Construct a dict of pods, keyed by pod uid"""
return {pod["metadata"]["uid"]: pod for pod in pods}
|
44b4167c561e494700e56a4967f731e0bef48aab
| 8,808
|
def _tags_conform_to_filter(tags, filter):
"""Mirrors Bazel tag filtering for test_suites.
This makes sure that the target has all of the required tags and none of
the excluded tags before we include them within a test_suite.
For more information on filtering inside Bazel, see
com.google.devtools.build.lib.packages.TestTargetUtils.java.
Args:
tags: all of the tags for the test target
filter: a struct containing excluded_tags and required_tags
Returns:
True if this target passes the filter and False otherwise.
"""
# None of the excluded tags can be present.
for exclude in filter.excluded_tags:
if exclude in tags:
return False
# All of the required tags must be present.
for required in filter.required_tags:
if required not in tags:
return False
# All filters have been satisfied.
return True
|
1db9528e11d1b513690af14f1d8453f8b0682d34
| 8,809
|
import optparse
def get_options():
"""define options for this script and interpret the command line"""
optParser = optparse.OptionParser()
optParser.add_option("--nogui", action="store_true",
default=False, help="run the commandline version of sumo")
options, args = optParser.parse_args()
return options
|
ce09dfd6de38781b31b1f07f8f52fd5236c850e5
| 8,811
|
def set_max_results(uri, max):
"""Set max-results parameter if it is not set already."""
max_str = str(max)
if uri.find('?') == -1:
return uri + '?max-results=' + max_str
else:
if uri.find('max-results') == -1:
return uri + '&max-results=' + max_str
else:
return uri
|
62793ad686c8abf0e2e750f65d9f709eee2c179e
| 8,812
|
import torch
def kronecker(mat1, mat2):
"""
kronecker product between 2 2D tensors
:param mat1: 2d torch.Tensor
:param mat2: 2d torch.Tensor
:return: kronecker product of mat1 and mat2
"""
s1 = mat1.size()
s2 = mat2.size()
return torch.ger(mat1.view(-1), mat2.view(-1)).reshape(*(s1 + s2)).permute([0, 2, 1, 3]).reshape(s1[0] * s2[0], s1[1] * s2[1])
|
930ac9827b92848656b6579c173b2d7675b7e657
| 8,813
|
def StringCompression(s: str) -> str:
"""Compresses the specified string using counts of repeated characters.
>>> StringCompression("")
''
>>> StringCompression("a")
'a'
>>> StringCompression("ab")
'ab'
>>> StringCompression("abc")
'abc'
>>> StringCompression("aba")
'aba'
>>> StringCompression("aa")
'a2'
>>> StringCompression("aab")
'a2b1'
>>> StringCompression("aabaa")
'a2b1a2'
>>> StringCompression("aabaabbcddeeef")
'a2b1a2b2c1d2e3f1'
>>> StringCompression("aabcccccaaa")
'a2b1c5a3'
"""
compressed_segments = []
i = 0
while i < len(s):
current_character = s[i]
count = 1
while i + 1 < len(s) and current_character == s[i + 1]:
i += 1
count += 1
compressed_segments.append((current_character, count))
i += 1
if all(map(lambda x: x[1] == 1, compressed_segments)):
return s
compressed_s = "".join(
[f"{character}{count}" for character, count in compressed_segments]
)
return compressed_s
|
e23e7f10b4259fed27b57dd3932be53373166821
| 8,815
|
import re
def possibly_fix_width(text):
"""Heuristic to possibly mark-up text as monospaced if it looks like
a URL, or an environment variable name, etc."""
if text in ['', '--']:
return text
# stringify the arguments
if type(text) not in [type('string'), type(u'Unicode')]:
text = "%r" % text
if text[0] in "$/" or "}" == text[-1] or re.match(r'^[A-Z_\${}:-]+$', text):
return '`%s`' % text
return text
|
36e72e4578aea9d4be735ecec518bd232e7d40db
| 8,816
|
def weighted_score(raw_earned, raw_possible, weight):
"""
Returns a tuple that represents the weighted (earned, possible) score.
If weight is None or raw_possible is 0, returns the original values.
When weight is used, it defines the weighted_possible. This allows
course authors to specify the exact maximum value for a problem when
they provide a weight.
"""
assert raw_possible is not None
cannot_compute_with_weight = weight is None or raw_possible == 0
if cannot_compute_with_weight:
return raw_earned, raw_possible
else:
return float(raw_earned) * weight / raw_possible, float(weight)
|
98ec27ebe606586811945650c18772801edd80a0
| 8,817
|
def addr_entry(key):
"""querries the user for address data, cleans it"""
# getting the input
rawline = input(
'please enter ' + key + ': '
)
# get rid of trailing whitespace
line = rawline.strip()
# replace tabs
line = line.replace('\t', ' ')
# remove forbidden strings
trans_tab = dict.fromkeys(
map(ord, '\:.\"\'!@#$\\\/'), None
)
line = line.translate(trans_tab)
if rawline != line:
print(
'Warning: string has been changed: \''
+ rawline
+ '\' to \''
+ line
+ '\'.'
)
return line
|
3edf6a39a276f9abb63ee5930a907b75ba3c623a
| 8,818
|
import argparse
def parse_arguments():
"""
Read in the config file specifying all of the parameters
"""
parser = argparse.ArgumentParser(description="Learn subgraph embeddings")
parser.add_argument("-config_path", type=str, default=None, help="Load config file")
args = parser.parse_args()
return args
|
f3f443f4df33718903132b721869eb276dbe855b
| 8,819
|
def harmonic_epmi_score(pdict, wlist1, wlist2):
""" Calculate harmonic mean of exponentiated PMI over all word pairs
in two word lists, given pre-computed PMI dictionary
- If harmonic ePMI is undefined, return -inf
"""
total_recip_epmi = None
# Number of pairs for which PMI exists
N = 0
for word1 in wlist1:
for word2 in wlist2:
# Enforce alphabetical order in pair
pair = tuple(sorted([word1, word2]))
wi, wj = pair
if wi in pdict and wj in pdict[wi]:
if total_recip_epmi is None:
total_recip_epmi = 0
total_recip_epmi += 1/(2**pdict[wi][wj])
N += 1
if total_recip_epmi is not None:
return N/total_recip_epmi
else:
return float("-inf")
|
5aec36df72e22fecbb1dfdcbc6ec840944a40d8d
| 8,820
|
def optional_apply(f, value):
"""
If `value` is not None, return `f(value)`, otherwise return None.
>>> optional_apply(int, None) is None
True
>>> optional_apply(int, '123')
123
Args:
f: The function to apply on `value`.
value: The value, maybe None.
"""
if value is not None:
return f(value)
|
dfa5b6793d7226370a27d6a638c0a5bc975f78d4
| 8,822
|
def repl_func(m):
"""process regular expression match groups for word upper-casing problem"""
return m.group(1) + m.group(2).upper()
|
72ae8d2cdcec98ce4ae661dbe020dc244d47c8af
| 8,823
|
def prod(F, E):
"""Check that the factorization of P-1 is correct. F is the list of
factors of P-1, E lists the number of occurrences of each factor."""
x = 1
for y, z in zip(F, E):
x *= y**z
return x
|
401a5596b42b1299a07b3f621c996226474735f5
| 8,824
|
def sort_data(data, cols):
"""Sort `data` rows and order columns"""
return data.sort_values(cols)[cols + ['value']].reset_index(drop=True)
|
33acbfd9be36d187120564f1792147b644b6c394
| 8,825
|
import torch
def argval_subsample_idx(values, n, polarity="MAX"):
"""values is a list, n an int, polarity is MAX or MIN"""
assert n > 0
descending = {"MAX": True, "MIN": False}[polarity]
_, idxs = torch.sort(torch.Tensor(values), descending=descending)
return idxs.tolist()[:n]
|
dbf15c72554be1bc750b12ca47c98b0e654a3c2e
| 8,826
|
import logging
def sanitise_graphite_url(current_skyline_app, graphite_url):
"""
Transform any targets in the URL that need modifications like double encoded
forward slash and return whether the URL was sanitised and the url.
:param current_skyline_app: the Skyline app calling the function
:param graphite_url: the URL
:type current_skyline_app: str
:type graphite_url: str
:return: sanitised, url
:rtype: tuple
"""
sanitised_url = graphite_url
sanitised = False
current_logger = None
if '.%2F' in sanitised_url:
try:
sanitised_url = graphite_url.replace('.%2F', '.%252F')
sanitised = True
try:
current_skyline_app_logger = str(current_skyline_app) + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
except:
pass
if current_logger:
current_logger.info('sanitise_graphite_url - transformed %s to %s' % (
graphite_url, str(graphite_url)))
except:
pass
if '+' in graphite_url:
try:
new_sanitised_url = sanitised_url.replace('+', '%2B')
sanitised_url = new_sanitised_url
sanitised = True
try:
current_skyline_app_logger = str(current_skyline_app) + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
except:
pass
if current_logger:
current_logger.info('sanitise_graphite_url - transformed %s to %s' % (
graphite_url, str(sanitised_url)))
except:
pass
return sanitised, sanitised_url
|
8151a83861a3ff275694946f623f6beb42fc6bbd
| 8,827
|
def _union_all(iterables):
"""Return a set representing the union of all the contents of an
iterable of iterables.
"""
out = set()
for iterable in iterables:
out.update(iterable)
return out
|
673bc7493007c6cf781d84490023cea7139f1e93
| 8,828
|
from typing import List
def construct_relative_positions(pos: int, max_length: int) -> List[int]:
"""Construct relative positions to a specified pos
Args:
pos: the pos that will be `0`
max_length: max sequence length
Returns:
a list of relative positions
Raises:
ValueError: if pos is less than 0 or greater equal than max_length
"""
if pos < 0 or pos >= max_length:
raise ValueError(f"pos: {pos} is not in [0, {max_length})")
positions = list(range(0, max_length, 1))
positions = list(map(lambda x: abs(x - pos), positions))
return positions
|
152c59d288f797ef87f9e0dbf1b415b71f1fe9e7
| 8,829
|
import subprocess
import os
def score(molfile1, molfile2, path_to_lsalign='/data/rsg/chemistry/yangk/LSalign/src'):
"""
LSalign similarity score for two molecules, each in a separate molfile whose path is given as input.
"""
with open('tmp.txt', 'w') as f:
subprocess.call([os.path.join(path_to_lsalign, 'LSalign'), molfile1, molfile2, '-rf 1'], stdout=f)
with open('tmp.txt', 'r') as f:
lines = f.readlines()
score_row = lines[3].strip().split()
pc_score_avg = (float(score_row[2]) + float(score_row[3])) * 0.5
return pc_score_avg
|
42a12c1b88d14793f13068fcdd097a5fb51cddbf
| 8,831
|
import pathlib
def _match_path(p1, p2):
"""Compare two paths from right to left and return True if they could refer
to the same file.
As a special case, if the second argument is None, or empty, it is always
considered a match. This simplifies query logic when the target does not
have a path component.
If p2 starts with "./" then the paths must math entirely. This to allow
addressing in the case where a path is a prefix of another.
"""
if not p2:
return True
part1 = pathlib.Path(p1).parts
part2 = pathlib.Path(p2).parts
if p2.startswith(".") and part2 and not part2[0].startswith("."):
minlen = 0
else:
minlen = min(len(part1), len(part2))
return part1[-minlen:] == part2[-minlen:]
|
7935e4312c444c9e2d0ee62611e1db5d6af210ad
| 8,832
|
import argparse
def create_parent_parser(prog_name):
"""
Create parent parser
Args:
prog_name (str): program name
Returns:
parser: parent argument parser
Raises:
DistributionNotFound: version of family not found
"""
parent_parser = argparse.ArgumentParser(prog=prog_name, add_help=False)
parent_parser.add_argument(
'-v', '--verbose',
action='count',
help='enable more verbose output')
return parent_parser
|
48cf982799d68ead1db28a95cf5d816a5cd2873c
| 8,833
|
import re
def has_text_an_image(text):
"""
:param text: String with base64 data
:return: true if text has base64 data otherwise false
"""
regex_to_extract = r'data:image.+\"'
return re.search(regex_to_extract, text)
|
c1d23738cc2f8f415a059a51b647d17157474655
| 8,835
|
def _extract_asm_mnemonic(asm):
"""
:param asm:
:type asm:
"""
return asm.split()[0].strip().upper()
|
058d92ceaa3fc6cae505c795c58a4e3f231bc849
| 8,838
|
def get_spaces(depth):
"""returns the required number of spaces
for indentation purpose"""
return ' ' * (depth * 4 - 2)
|
f59faaa963b8f1c16e20925b088eb1b7b8fda953
| 8,840
|
import re
def _clean(arr):
"""Convert ratio values for missing pieces of the tax into "unclassified"
and remove empty entries.
Rules:
- if all the cols are unclassified then remove row.
- if there is no classification for either genus, subfamily or family then remove row.
- collapse unclassified values from low to high
- example: Caudovirales unclassified unclassified unclassified
should be: Caudovirales
"""
converted_tax = []
for tax_value in arr[::-1]:
if tax_value == "" or \
tax_value == "\n" or \
re.match(r"^[+-]?\d(>?\.\d+)?$", tax_value):
converted_tax.append("unclassified")
else:
converted_tax.append(tax_value.strip())
# rule 1
if len(converted_tax) == converted_tax.count("unclassified"):
return tuple(("unclassified",))
# rule 2 and 3
result = []
for i in range(0, len(converted_tax) - 1):
if not (converted_tax[i] == "unclassified" and
converted_tax[i + 1] == "unclassified"):
result.append(converted_tax[i])
if converted_tax[-1] != "unclassified":
result.append(converted_tax[-1])
return tuple(result)
|
c898fe0b9ecf6afef47f76a8d4f74947a0df389f
| 8,841
|
from os import popen
def unixgetaddr(program):
"""Get the hardware address on a Unix machine."""
for line in popen(program):
words = line.lower().split()
if 'hwaddr' in words:
addr = words[words.index('hwaddr') + 1]
return int(addr.replace(':', ''), 16)
if 'ether' in words:
addr = words[words.index('ether') + 1]
return int(addr.replace(':', ''), 16)
|
aef054ae0f3d1d812dacce9da483d45aceb2bbfa
| 8,842
|
def sequence_id(sqlite_connection):
"""
Функция возвращает список из id всех остановок
"""
try:
cur = sqlite_connection.cursor()
cur.execute("SELECT _id FROM stopsker")
seq = list()
for elem in cur.fetchall():
seq.append(int(elem[0]))
except Exception as exp:
print({exp})
exit()
else:
return seq
|
f73352b977c2c96a378985899f02674ab0c2cb14
| 8,844
|
def add_matches(flight_matches, flight_ids):
"""
Add new matches to the flight_ids dict.
Returns the number of newly matched flights.
"""
matches = 0
for i in flight_matches.index:
prev_id = flight_matches.loc[i, 'FLIGHT_ID_x']
next_id = flight_matches.loc[i, 'FLIGHT_ID_y']
if next_id not in flight_ids:
flight_ids[next_id] = prev_id
matches += 1
return matches
|
93d086cd580ac13622c4acaa359fa2a65b718ff3
| 8,845
|
def get_requirements_from_file(requirements_file):
"""
Get requirements from file.
:param str req_file: Name of file to parse for requirements.
:return: List of requirements
:rtype: list(str)
"""
requirements = []
with open(requirements_file) as f:
for line in f:
line = line.strip()
if line and not line.startswith(("#", "-e")):
requirements.append(line)
return requirements
|
5ec6ad1f4c2b22aae1cfa7eb3888b3279ffeca31
| 8,846
|
from typing import Tuple
from typing import List
def extract_encoded_headers(payload: bytes) -> Tuple[str, bytes]:
"""This function's purpose is to extract lines that can be decoded using the UTF-8 decoder.
>>> extract_encoded_headers("Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n\\r\\n".encode("utf-8"))
('Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n', b'')
>>> extract_encoded_headers("Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n\\r\\nThat IS totally random.".encode("utf-8"))
('Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n', b'That IS totally random.')
"""
result: str = ""
lines: List[bytes] = payload.splitlines()
index: int = 0
for line, index in zip(lines, range(0, len(lines))):
if line == b"":
return result, b"\r\n".join(lines[index + 1 :])
try:
result += line.decode("utf-8") + "\r\n"
except UnicodeDecodeError:
break
return result, b"\r\n".join(lines[index + 1 :])
|
d1f3a371419b81b0e7ede1a7f90401cf7a89559f
| 8,847
|
def formatted_loss_components_string(components: dict) -> str:
"""
Formats the components returned by calc_LV_Lbeta
"""
total_loss = components['L_V']+components['L_beta']
fractions = { k : v/total_loss for k, v in components.items() }
fkey = lambda key: f'{components[key]:+.4f} ({100.*fractions[key]:.1f}%)'
s = (
'L_V+L_beta = {L:.4f}'
'\n L_V = {L_V}'
'\n L_V_attractive = {L_V_attractive}'
'\n L_V_repulsive = {L_V_repulsive}'
'\n L_beta = {L_beta}'
'\n L_beta_noise = {L_beta_noise}'
'\n L_beta_sig = {L_beta_sig}'
.format(L=total_loss,**{k : fkey(k) for k in components})
)
if 'L_beta_norms_term' in components:
s += (
'\n L_beta_norms_term = {L_beta_norms_term}'
'\n L_beta_logbeta_term = {L_beta_logbeta_term}'
.format(**{k : fkey(k) for k in components})
)
if 'L_p' in components:
s += f'\n L_p = {fkey("L_p")}'
for key in sorted(k for k in components.keys() if k.startswith('L_p_')):
s += f'\n {key:<20}= {fkey(key)}'
s += f'\nTOTAL: L_V + L_beta + L_p = {total_loss+components["L_p"]}'
return s
|
717419a5ddccc0c3b020b6412cd497e5d67c8602
| 8,849
|
def filter_ignore(annotations, filter_fns):
""" Set the ``ignore`` attribute of the annotations to **True** when they do not pass the provided filter functions.
Args:
annotations (dict or list): Dictionary containing box objects per image ``{"image_id": [box, box, ...], ...}`` or list of annotations
filter_fns (list or fn): List of filter functions that get applied or single filter function
Returns:
(dict or list): boxes after filtering
"""
if callable(filter_fns):
filter_fns = [filter_fns]
if isinstance(annotations, dict):
for _, values in annotations.items():
for anno in values:
if not anno.ignore:
for fn in filter_fns:
if not fn(anno):
anno.ignore = True
break
else:
for anno in annotations:
if not anno.ignore:
for fn in filter_fns:
if not fn(anno):
anno.ignore = True
break
return annotations
|
f59e6c481eb744245ae9503ae07ed88d1f3f8253
| 8,850
|
def update_dict_params_for_calibration(params):
"""
Update some specific parameters that are stored in a dictionary but are updated during calibration.
For example, we may want to update params['default']['compartment_periods']['incubation'] using the parameter
['default']['compartment_periods_incubation']
:param params: dict
contains the model parameters
:return: the updated dictionary
"""
if "n_imported_cases_final" in params:
params["data"]["n_imported_cases"][-1] = params["n_imported_cases_final"]
for location in ["school", "work", "home", "other_locations"]:
if "npi_effectiveness_" + location in params:
params["npi_effectiveness"][location] = params["npi_effectiveness_" + location]
for comp_type in [
"incubation",
"infectious",
"late",
"hospital_early",
"hospital_late",
"icu_early",
"icu_late",
]:
if "compartment_periods_" + comp_type in params:
params["compartment_periods"][comp_type] = params["compartment_periods_" + comp_type]
return params
|
8aaf9cb030076adfddb7c8d5740a2c8cc5c21c06
| 8,851
|
def counting_sort(numbers):
"""Sort given numbers (integers) by counting occurrences of each number,
then looping over counts and copying that many numbers into output list.
Running time: O(n + k) where k is the range of numbers, because if k is really high then affects the run time significantly.
Memory usage: O(k) because the number of total arrays is equal to the value of k"""
# creating a temp array with 0, times however many based on the max value of numbers + 1 b/c indexing starts at 1
temp_array = [0] * (max(numbers) + 1)
# loop through numbers
for num in numbers:
# if the temp array's index of that number is 0 - meaning it's empty then set it to be 1
if temp_array[num] == 0:
temp_array[num] = 1
# else there's already something in there so just add one to it
else:
temp_array[num] += 1
numbers = []
# loop through the temp_array
for y in range(len(temp_array)):
# if it's index is not equal to 0 then we add the count; and looping until it's index value is 0
while temp_array[y] != 0:
numbers.append(y)
temp_array[y] -= 1
return numbers
|
7123a215f685ff251c13cfd5210fe1887fe3795f
| 8,852
|
def _unit_fun(x):
"""unit function"""
return x
|
236dfe5d50b76549e601ef5c65c6a04cf8fcbeb8
| 8,856
|
import math
import statistics
def _get_average_da(streams_da: dict) -> dict:
"""Calculate average data availability among all data streams"""
total_results = {}
for k, v in streams_da.items():
for i, j in v.items():
if i not in total_results:
total_results[i] = []
total_results[i].append(j)
return {k: math.ceil(statistics.mean(v)) for k, v in total_results.items()}
|
db2fde9e13b4cbb5ce43d5f3c2d2ff2abd30f487
| 8,857
|
import os
def basename(fullname):
"""os.path.basename"""
return os.path.basename(fullname)
|
316d7da5ed346b5923ee053b578be673f13f6c21
| 8,858
|
def fixture_extended():
"""Return a /forecast/extended/pollen/<ZIP> response."""
return {
"Type": "pollen",
"ForecastDate": "2018-06-12T00:00:00-04:00",
"Location": {
"ZIP": "80238",
"City": "DENVER",
"State": "CO",
"periods": [
{"Period": "2018-06-12T13:47:12.897", "Index": 6.60},
{"Period": "2018-06-13T13:47:12.897", "Index": 6.30},
{"Period": "2018-06-14T13:47:12.897", "Index": 7.60},
{"Period": "2018-06-15T13:47:12.897", "Index": 7.60},
{"Period": "2018-06-16T13:47:12.897", "Index": 7.30},
],
"DisplayLocation": "Denver, CO",
},
}
|
eff02c6271bc985473c86203cb7a69f4e673c827
| 8,859
|
def is_sequencing(lane_info):
"""
Determine if we are just sequencing and not doing any follow-up analysis
"""
if lane_info['experiment_type'] in ('De Novo','Whole Genome'):
return True
else:
return False
|
822125f8603969a4624e07188874aae40f8752d3
| 8,860
|
import string
def _split_punc(source):
"""Split leading or trailing punctuation."""
tokens = source.split(" ")
new_tokens = []
for token in tokens:
if all(char in string.punctuation for char in token):
new_tokens.append(token)
continue
leading_punc = None
for punc in string.punctuation:
if token.startswith(punc):
leading_punc = punc
token = token.lstrip(punc)
break
trailing_punc = None
for punc in string.punctuation:
if token.endswith(punc):
trailing_punc = punc
token = token.rstrip(punc)
break
if leading_punc:
new_tokens.append(leading_punc)
if token:
new_tokens.append(token)
if trailing_punc:
new_tokens.append(trailing_punc)
return " ".join(new_tokens)
|
79f49cdfe0663b19f634c749c4bb5d515568fc05
| 8,861
|
def get_cluster_name(tags):
"""
Get the cluster name from the list of specified tags
:param tags: tags
:type tags: [str]
:returns: cluster name
:rtype: str
"""
for tag in tags:
if tag.startswith("storm.cluster.name:"):
return tag.replace("storm.cluster.name:", "")
return None
|
2b811f32d5c61bb093d6a68fcaecddbdce3be057
| 8,862
|
from typing import BinaryIO
import io
import wave
import struct
def fixture_two_chunk_plain_wav() -> BinaryIO:
"""Creates a fixture WAVE file with two distinct sections.
The audio is 100Hz mono. Each section 10 samples long. Samples in the first
alternate between +/-(1 << 5) and in the second between +/-(1 << 10).
Returns:
File-like object with the bytes of the fixture WAVE file, positioned at the
beginning.
"""
sample_rate = 100
chunk_duration_samples = 10
plain_wav_io = io.BytesIO()
with wave.open(plain_wav_io, 'wb') as writer:
writer.setnchannels(1)
writer.setsampwidth(2)
writer.setframerate(sample_rate)
signs = [pow(-1, i) for i in range(chunk_duration_samples)]
for magnitude in [(1 << 5), (1 << 10)]:
writer.writeframes(
struct.pack('<%dh' % len(signs), *[magnitude * s for s in signs]))
plain_wav_io.seek(0)
return plain_wav_io
|
3c6d06409b40228348c3e5697b8fdc1b9bc73c90
| 8,863
|
def _detailed_parse_choice(choice):
"""Return Selected Choice's Full Name string as per its codename. Choices are based as per our server.
:param choice: str
The code name of the choice
:return: str
Return Selected Choice's Full Name string as per its codename
"""
# Return Choice Full Name string
if choice == "gensim-sum":
return "Text Rank Algorithm Based (Gensim)"
elif choice == "spacy-sum":
return "Frequency Based (Spacy)"
elif choice == "nltk-sum":
return "Frequency Based (NLTK)"
elif choice == "sumy-lsa-sum":
return "Latent Semantic Analysis Based (Sumy)"
elif choice == "sumy-luhn-sum":
return "Luhn Algorithm Based (Sumy)"
elif choice == "sumy-text-rank-sum":
return "Text Rank Algorithm Based (Sumy)"
|
3b7fe18c322426e478b5243ec75e3881d1bd690e
| 8,865
|
def compute_mass_list(dialog_idx, sen_idx):
"""
Most confusing step...
dialog_idx = [41, 81, 134, ...]
means the first dialogue of the subtitle is plain_text[0:41]
the second dialogue of the subtitle is plain_text[41:81]
the third dialogue of the subtitle is plain_text[81:134]
sen_idx = [81, 204, ...]
means the first sentence of the subtitle is plain_text[0:81]
the second sentence of the subtitle is plain_text[81:204]
Usually the len(dialog_idx) is larger than the len(sen_idx), because one sentence may so long
that the video maker have to split it into multiple dialogues.
What this function want to do is try to figure out each sentence belongs to which dialogues.
For example:
Sentence: Coding has been the bread and butter for developers since the dawn of computing. [(5, 41), (6, 81)]
means the "Coding has been the bread and butter for"(length equals to 41) is the 5th dialogue of the subtitle,
"developers since the dawn of computing"(from position 41 to 81) is the 6th dialogue of the subtitle.
mass_list = [[(1, a), (2, b)], [(3, c)], [(4, d), (5, e), (6, f)]]
means a subtitle include 3 sentence (the length of the list record_each_sentence, len(record_each_sentence))
In the first sentence: there are 2 dialogues, the first dialogue is first_sentence[0:a]
the second dialogue is first_sentence[a:b]
In the second sentence: there are 1 dialogues, the third dialogue of the whole subtitle is second_sentence[0:c]
:param dialog_idx:
:param sen_idx:
:return: record_each_sentence
"""
i = 0
j = 1
mass_list = []
one_sentence = []
while i < len(dialog_idx):
if dialog_idx[i] > sen_idx[j]:
mass_list.append(one_sentence)
one_sentence = []
j += 1
else:
one_sentence.append((i + 1, dialog_idx[i] - sen_idx[j - 1]))
i += 1
mass_list.append(one_sentence)
return mass_list
|
9745fbebde8302a1fa3b4fb4d94cc19eb3316458
| 8,866
|
import re
def do_parse_pod_name(text):
"""Find the pod name from the failure and return the pod name."""
p = re.search(r' pod (\S+)', text)
if p:
return re.sub(r'[\'"\\:]', '', p.group(1))
else:
return ""
|
01c5999a4b735973ef25fc9c0e563cfbd5e70610
| 8,868
|
def _testProduct_to_dict(product_obj, ctx):
"""
Returns testProduct instance in dict format.
Args:
product_obj (_TestProduct): testProduct instance.
ctx (SerializationContext): Metadata pertaining to the serialization
operation.
Returns:
dict: product_obj as a dictionary.
"""
return {"productId": product_obj.product_id,
"productName": product_obj.name,
"price": product_obj.price,
"tags": product_obj.tags,
"dimensions": product_obj.dimensions,
"warehouseLocation": product_obj.location}
|
7db80ae68cb6966273e53a4f0fb2d9aad52fa119
| 8,869
|
def hxltm_hastag_de_csvhxlated(csv_caput: list) -> list:
"""hxltm_hastag_de_csvhxlated [summary]
Make this type of conversion:
- 'item__conceptum__codicem' => '#item+conceptum+codicem'
- 'item__rem__i_ara__is_arab' => '#item+rem+i_ara+is_arab'
- '' => ''
Args:
csv_caput (list): Array of input items
Returns:
[list]:
"""
resultatum = []
for item in csv_caput:
if len(item):
resultatum.append('#' + item.replace('__', '+').replace('?', ''))
else:
resultatum.append('')
return resultatum
|
1ab1503c26c86c969e699236f97842ae74ae0ae5
| 8,870
|
def preprocessing(df, attribute):
"""
This is the base preprocessing for french.
It scapes \ char and replace all ocurrence of -
:df: pandas data frame.
:attribute: atribute of df.
"""
return getattr(df, attribute).map(lambda sent: sent.lower().replace('\'', '\\\' ').replace('-', ' '))
|
6ca98d434ba5a43667b2dfc1501bf152f151c209
| 8,873
|
import pickle
def load_pickle(file, decompress=True):
"""
Load a .pickle file.
:param file: file .pickle to load.
:param decompress: the compress or not the file
:return: loaded data.
"""
with open(file, "rb") as f:
if decompress:
data = pickle.load(f)
else:
data = f.read()
return data
|
ce86a034c87ddd3a74de40465d60cb2f55d1089c
| 8,877
|
def latex(df):
"""Converte o DF fornecido para tabela LaTeX"""
return print(df.to_latex())
|
6ab524733ac1f9040699f349564cf4321ae6e909
| 8,878
|
def get_accountinfo(msg: dict) -> str:
"""
Returns a dictionary containing the
account id and an array of prowler group checks.
"""
if msg == "":
raise IndexError
else:
try:
account_id = msg['Id']
return account_id
except KeyError as err:
raise err
|
496c3c1f0c64ecb8627f51bce69e6d5672406344
| 8,879
|
import os
def add_suffix(img_file,suffix):
"""
add suffix for a given file name, and not change the file type
:param img_file: img file, e.g. "xxx.jpg"
:param suffix: "——abcde"
:return: "xxx——abcde.jpg"
"""
name = os.path.splitext(img_file)[0]
type_ = os.path.splitext(img_file)[1]
file_name = name + suffix + type_
return file_name
|
06311bab61d084595b7c06fb09750a3d0c0a0abc
| 8,881
|
def shp2geojsonOgr(layer):
"""Shapefile to Geojson conversion using ogr."""
cmd = 'ogr2ogr -f GeoJSON -t_srs'\
+ ' crs:84'\
+ ' {layer}.geojson'\
+ ' {layer}.shp'
cmd = cmd.format(layer=layer)
return cmd
|
a1bbf42d83cf9d26542c02eb1a16da971a7d0a9e
| 8,882
|
def lut_canonical_potential_edge(potential_edge):
"""Returns a canonical name of a potential edge, with respect to LUT height.
Parameters
----------
potential_edge : str
Instantiated name of the potential edge to be canonicized.
Returns
-------
str
A canonical potential edge.
"""
prefix, u, v = potential_edge.split("__")
lut_span = int(v.split('_')[1]) - int(u.split('_')[1])
if lut_span < 0:
offset_str = "lutm%d_" % abs(lut_span)
else:
offset_str = "lutp%d_" % abs(lut_span)
canonical = "__".join([prefix, '_'.join(u.split('_')[2:]),\
offset_str + '_'.join(v.split('_')[2:])])
return canonical
|
ccae7b98de4aa18a2ffa72c0faf6b0fe7b001db0
| 8,883
|
def encode_boolean(value):
"""
Returns 1 or 0 if the value is True or False.
None gets interpreted as False.
Otherwise, the original value is returned.
"""
if value is True:
return 1
if value is False or value is None:
return 0
return value
|
4a6442438d3a7e85597ac76d2f48ce44ba505be2
| 8,885
|
def validate_sami_id(candidate_sami_id):
"""Dummy validation function for SAMI IDs, always returns true."""
return True
|
302df887aeafb7e93437e5103a8e0b77fea72b6e
| 8,886
|
def get_workloads(month):
"""Calls the same method on the Month model."""
return month.get_workloads()
|
0b71297c0e244c07e8c832c69037f6b428b60316
| 8,887
|
def associate_node_id(tr, node=""):
"""
Returns a dictionary with key 'id' and value as the ID associated
with the node string.
"""
return {"id": tr.get_uml_id(name=node)}
|
5e6eb1076cdeed9abc8b00d1de60a255f6292dd3
| 8,891
|
import argparse
def parse_arguments(args):
"""
Parse the arguments from the user
"""
parser = argparse.ArgumentParser(
description= "Filter UniRef EC list\n",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"-i","--input",
help="the UniRef EC triplet file [map_EC_to_triplet_AC_U50_U90_Swissprot_and_Trembl.txt]\n",
required=True)
parser.add_argument(
"-o","--output",
help="the filtered UniRef file to write\n",
required=True)
parser.add_argument(
"-u","--uniref",
help="the UniRef type\n",
choices=["50","90"],
required=True)
parser.add_argument(
"-m","--min-ec-level",
help="the minimum EC level for filtering\n",
choices=["1","2","3","4"],
required=True)
return parser.parse_args()
|
b1803e6d8d0c1101831838a69711770f9f27c131
| 8,892
|
import os
def extract_games(world_folder):
""" Locate the games in a world folder. """
envs = []
for filename in os.listdir(world_folder):
if filename.endswith(".ulx"):
envs.append(world_folder + "/" + filename)
return envs
|
a0f60fadf0662dc4d420edb213fd42bca654c800
| 8,893
|
def clause_words(clause_tree):
""" Returns the constants in a clause. These will be everything that is not a keyword. """
if clause_tree == []:
return []
elif clause_tree[0] == 'func':
assert(len(clause_tree) == 3)
if clause_tree[2] == []:
return [clause_tree[1]]
else:
return [clause_tree[1]] + [clause_words(c) for c in clause_tree[2]]
elif clause_tree[0] == 'var':
return []
else:
return [clause_words(c) for c in clause_tree[1:]]
|
5f72cb17397acb9313db5625afd6b4b073f4172f
| 8,894
|
import re
def find_nonAscii(text):
""" Return the first appearance of a non-ASCII character (in a `Match` object), or `None`. """
regex = re.compile(r'([^\x00-\x7F])+')
return re.search(regex, text)
|
bc299752eab5088214f9e1f62add388bf0721153
| 8,895
|
def linear_search(arr, x):
"""
Performs a linear search
:param arr: Iterable of elements
:param x: Element to search for
:return: Index if element found else None
"""
l = len(arr)
for i in range(l):
if arr[i] == x:
return i
return None
|
2cb03eef6c9bb1d63df97c1e387e9cbfe703769a
| 8,896
|
import argparse
def arg_parser() -> dict:
"""Parse CLI arguments.
Returns:
dict: parsed arguments in dictionary
"""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("words", nargs="+", type=str, help="Sentence that will be translated")
return parser.parse_args().__dict__
|
8962a16da185f145f8b5db335a65a2fd5d247f3c
| 8,899
|
def out_labels(G, q):
"""Returns a list of each of the labels appearing on the edges
starting at `q` in `G`.
Parameters
----------
G : labeled graph
q : vertex in `G`
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(1, 2, label="a")
>>> G.add_edge(1, 3, label="a")
>>> G.add_edge(1, 1, label="b")
>>> sd.out_labels(G, 1)
['a', 'a', 'b']
"""
return [label for (_, _, label) in G.out_edges(q, data="label")]
|
9849b96b562c74259b631907335a40f807e11709
| 8,900
|
def convert_tf_config_to_jax_bert(config):
"""Convert TF BERT model config to be compatible with JAX BERT model.
Args:
config: dictionary of TF model configurations
Returns:
dictionary of param names and values compatible with JAX BERT model
"""
unnecessary_keys = ['initializer_range', 'backward_compatible',
'embedding_size']
for key in unnecessary_keys:
if key in config:
config.pop(key)
# change TF parameter names to match JAX parameter names
mapping = {
'attention_dropout_rate': 'attention_probs_dropout_prob',
'hidden_activation': 'hidden_act',
'dropout_rate': 'hidden_dropout_prob',
'emb_dim': 'hidden_size',
'mlp_dim': 'intermediate_size',
'max_len': 'max_position_embeddings',
'num_heads': 'num_attention_heads',
'num_layers': 'num_hidden_layers'
}
for jax_key, tf_key in mapping.items():
config[jax_key] = config.pop(tf_key)
return config
|
2e527dbdbef404ebf3015eca2aa9eea2b9d892e0
| 8,902
|
def find_sequences_before(context, strip):
"""
Returns a list of sequences that are before the strip in the current context
"""
return [s for s in context.sequences if s.frame_final_end <= strip.frame_final_start]
|
d49a950c06c2a92d076d9790055c21d30afdd627
| 8,903
|
import math
def update_one_contribute_score(user_total_click_num):
"""
item cf update sim contribution score by user
"""
return 1/math.log10(1+user_total_click_num)
|
3e80ae2f85a53737d0e155ff5f97910f73c9053f
| 8,904
|
def get_percent_alloc(values):
"""
Determines a portfolio's allocations.
Parameters
----------
values : pd.DataFrame
Contains position values or amounts.
Returns
-------
allocations : pd.DataFrame
Positions and their allocations.
"""
return values.divide(
values.sum(axis='columns'),
axis='rows'
)
|
7f4ec48b2adbdb812292930e7fda50038b6d5e96
| 8,906
|
import argparse
def parse_args():
"""Argument parser."""
parser = argparse.ArgumentParser(description='Argument parser for AUPR-in/AUPR-out evaluations.')
parser.add_argument('--algo_name', type=str, default='e3outlier-0.1')
parser.add_argument('--positive', type=str, default='inliers', choices=['inliers', 'outliers'],
help='Whether the positive class is inliers or outliers')
parser.add_argument('--results_dir', type=str, default='./results')
parser.add_argument('--dataset', type=str, default='cifar10')
return parser.parse_args()
|
fe555da78a56ebd57ccca5fc160debabd0310c3d
| 8,907
|
def format_latex(ss):
"""
Formats a string so that it is compatible with Latex.
:param ss: The string to format
:type ss: string
:return: The formatted string
:rtype: string
"""
tt = (str(ss).replace('_', ' ')
.replace('%', '\%')
)
return tt
|
5081e65375faf592f2f1fb52d11b0dcee99fa85f
| 8,909
|
def MatchingFileType(file_name, extensions):
"""Returns true if the file name ends with one of the given extensions."""
return bool([ext for ext in extensions if file_name.lower().endswith(ext)])
|
5fe5121d270cdfc13f6f9f3c72471fc3572b0efe
| 8,911
|
def escape_path(value: bytes) -> str:
"""
Take a binary path value, and return a printable string, with special
characters escaped.
"""
def human_readable_byte(b: int) -> str:
if b < 0x20 or b >= 0x7F:
return "\\x{:02x}".format(b)
elif b == ord(b"\\"):
return "\\\\"
return chr(b)
return "".join(human_readable_byte(b) for b in value)
|
07a0c28cd531d8e3bd4330afe1d4d51265cd80c4
| 8,912
|
def raw_reward_threshold(threshold):
"""Return a reward processor that cut off at a threshold."""
def fn(metadata):
if metadata['raw_reward'] > threshold:
return 1.
elif metadata['raw_reward'] > 0:
return -1
return metadata['raw_reward']
return fn
|
1efbd90c352d99c6e65b05214d8ccb82bb155606
| 8,915
|
import socket
def get_hostname() -> str:
"""
Get the current hostname, or fall back to localhost.
"""
try:
return socket.getfqdn()
except:
return 'localhost'
|
c53bd9fae0fbbae0c0b4f84e64064d7bfd2fd61e
| 8,917
|
def SerializeEntries(entries):
"""Serializes given triplets of python and wire values and a descriptor."""
output = []
for python_format, wire_format, type_descriptor in entries:
if wire_format is None or (python_format and
type_descriptor.IsDirty(python_format)):
wire_format = type_descriptor.ConvertToWireFormat(python_format)
output.extend(wire_format)
return "".join(output)
|
fe89382e2be003bd6dce25b973d8bd3ad403c492
| 8,918
|
import socket
import subprocess
import json
def get_instance_identification():
"""
Gets an identifier for an instance. Gets EC2 instanceId if possible, else local hostname
"""
instance_id = socket.gethostname()
try:
# "special tactics" for getting instance data inside EC2
instance_data = subprocess.check_output(
["curl", "--silent", "http://169.254.169.254/latest/dynamic/instance-identity/document"])
# convert from json to dict
if "bytes" in str(type(instance_data)): instance_data = instance_data.decode()
instance_data = json.loads(instance_data)
# get the instanceId
if 'instanceId' in instance_data:
instance_id = instance_data['instanceId']
except Exception as e:
raise Exception("{}\nFailed to get instance identification. "
"Check if you are actually on an aws EC2 server.".format(e))
return instance_id
|
13528289a0db7337e3b7c074b617906f3ad23dbf
| 8,920
|
from datetime import datetime
def get_submission_data_from_pushshift(raw_submission):
"""Creates a submission object from a Pushshift Submission JSON
Parameters:
raw_submission (dict): Pushshift Submission instance
Returns:
dict: object with information about a submission, like body, author or URL
"""
if (not 'selftext' in raw_submission) or ('selftext' in raw_submission and raw_submission["selftext"] == "") or \
('selftext' in raw_submission and raw_submission["selftext"] is not None and raw_submission["selftext"].strip() == "") or \
raw_submission["selftext"] == "[deleted]" or raw_submission["selftext"] == "[removed]":
return None
date = datetime.fromtimestamp(raw_submission["created_utc"]) if 'created_utc' in raw_submission else None
return {
"author": raw_submission["author"] if 'author' in raw_submission else None,
"created_utc": raw_submission["created_utc"] if 'created_utc' in raw_submission else None,
"date": date.strftime('%Y-%m-%d %H:%M:%S') if date is not None else None,
"id": raw_submission["id"] if 'id' in raw_submission else None,
"is_original_content": raw_submission["is_original_content"] if 'is_original_content' in raw_submission else None,
"is_text_only": raw_submission["is_self"] if 'is_self' in raw_submission else None,
"locked": raw_submission["locked"] if 'locked' in raw_submission else None,
"num_comments": raw_submission["num_comments"] if 'num_comments' in raw_submission else None,
"over_18": raw_submission["over_18"] if 'over_18' in raw_submission else None,
"permalink": raw_submission["permalink"] if 'permalink' in raw_submission else None,
"score": raw_submission["score"] if 'score' in raw_submission else None,
"body": raw_submission["selftext"] if 'selftext' in raw_submission else None,
"spoiler": raw_submission["spoiler"] if 'spoiler' in raw_submission else None,
"stickied": raw_submission["stickied"] if 'stickied' in raw_submission else None,
"subreddit_id": raw_submission["subreddit_id"] if 'subreddit_id' in raw_submission else None,
"subreddit_name": raw_submission["subreddit"] if 'subreddit' in raw_submission else None,
"title": raw_submission["title"] if 'title' in raw_submission else None,
"upvote_ratio": raw_submission["upvote_ratio"] if 'upvote_ratio' in raw_submission else None,
"url": raw_submission["url"] if 'url' in raw_submission else None
}
|
0fa00524408623127d32fb6db89398285953ec9b
| 8,921
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.