content
stringlengths 35
416k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
import json
def read_json(file_path: str) -> dict:
"""Reads json file from the given path.
Args:
file_path (str): Location of the file
Returns:
dict: Json content formatted as python dictionary in most cases
"""
with open(file_path, "r") as f:
return json.load(f) | 251c0ad8597ca2819727f95e7e52aa062444cba2 | 9,595 |
def str_is_none(source):
"""
ๅคๆญๅญ็ฌฆไธฒไธไธบ็ฉบ
"""
if source == '' or source == 'NULL' or source == 'None' or source is None:
return True
return False | e99d4bf0cae1e0a92cf51a133280a823fc354b31 | 9,596 |
def get_weather_units(units):
"""returns a str representation of units of measurement that corresponds to given system of units.
if units is 'metric' return ยฐC
if units is 'kelvin' return K
by default if units is 'imperial' return ยฐF
Parameters
----------
:param str units: the system of units.
:return: a str representation of the units of measurement.
"""
degree_symbol = '\u00b0'
if units.lower() == 'metric':
return f'{degree_symbol}C'
if units.lower() == 'kelvin':
return 'K'
return f'{degree_symbol}F' | 6a12cb96e98f6ccf95927a79a5e9ffaa0a31d4ab | 9,598 |
from typing import Callable
from typing import Iterable
from typing import Dict
from typing import Any
def groupby_many_reduce(key: Callable, reducer: Callable, seq: Iterable):
"""Group a collection by a key function, when the value is given by a reducer function.
Parameters:
key (Callable): Key function (given object in collection outputs key).
reducer (Callable): Reducer function (given object in collection outputs new value).
seq (Iterable): Collection.
Returns:
Dict[Text, Any]: Dictionary where key has been computed by the `key` function
and value by the `reducer` function.
>>> groupby_many_reduce(head, lambda x, y: x + len(y) if x else len(y), ["hello", "hi", "test", "to"])
{'h': 7, 't': 6}
"""
result: Dict[Any, Any] = {}
for element in seq:
for key_result in key(element):
result[key_result] = reducer(result.get(key_result, None), element)
return result | 7d08325206bfb78cfe421244af6d91b4cd3ceb56 | 9,600 |
def categorize(state):
""" Given a state, categorize it as winning(good)=0/ losing(bad)=1/ tie=2 or incomplete game=3 state"""
if (state[0][0] == 2 and state[0][1] == 2 and state[0][2] == 2) or (
state[1][0] == 2 and state[1][1] == 2 and state[1][2] == 2) or (
state[2][0] == 2 and state[2][1] == 2 and state[2][2] == 2) or (
state[0][0] == 2 and state[1][0] == 2 and state[2][0] == 2) or (
state[0][1] == 2 and state[1][1] == 2 and state[2][1] == 2) or (
state[0][2] == 2 and state[1][2] == 2 and state[2][2] == 2) or (
state[0][0] == 2 and state[1][1] == 2 and state[2][2] == 2) or (
state[0][2] == 2 and state[1][1] == 2 and state[2][0] == 2):
return 0 # GAME WON
if (state[0][0] == 0 and state[0][1] == 0 and state[0][2] == 0) or (
state[1][0] == 0 and state[1][1] == 0 and state[1][2] == 0) or (
state[2][0] == 0 and state[2][1] == 0 and state[2][2] == 0) or (
state[0][0] == 0 and state[1][0] == 0 and state[2][0] == 0) or (
state[0][1] == 0 and state[1][1] == 0 and state[2][1] == 0) or (
state[0][2] == 0 and state[1][2] == 0 and state[2][2] == 0) or (
state[0][0] == 0 and state[1][1] == 0 and state[2][2] == 0) or (
state[0][2] == 0 and state[1][1] == 0 and state[2][0] == 0):
return 1 # GAME LOST
tie_exp = True
for i in range(0,3):
if not tie_exp:
break
for j in range(0,3):
if state[i][j] == 1:
tie_exp = False
break
if tie_exp:
return 2 # TIE
else:
return 3 | 20e8931b8204ad360afcb9521c3a375c07a5aecd | 9,601 |
def _FetchAllFiles(input_api, white_list, black_list):
"""Hack to fetch all files."""
# We cannot use AffectedFiles here because we want to test every python
# file on each single python change. It's because a change in a python file
# can break another unmodified file.
# Use code similar to InputApi.FilterSourceFile()
def Find(filepath, filters):
if input_api.platform == 'win32':
filepath = filepath.replace('\\', '/')
for item in filters:
if input_api.re.match(item, filepath):
return True
return False
files = []
path_len = len(input_api.PresubmitLocalPath())
for dirpath, dirnames, filenames in input_api.os_walk(
input_api.PresubmitLocalPath()):
# Passes dirnames in black list to speed up search.
for item in dirnames[:]:
filepath = input_api.os_path.join(dirpath, item)[path_len + 1:]
if Find(filepath, black_list):
dirnames.remove(item)
for item in filenames:
filepath = input_api.os_path.join(dirpath, item)[path_len + 1:]
if Find(filepath, white_list) and not Find(filepath, black_list):
files.append(filepath)
return files | 99f3a88be278af877ab4dec2a164ce4093a16e82 | 9,602 |
def store(src, rel, dst):
"""
Returns an SQL statement to store an edge into
the SQL backing store.
:param src: The source node.
:param rel: The relation.
:param dst: The destination node.
"""
smt = 'INSERT INTO %s (src, dst) VALUES (?, ?)'
return smt % rel, (src, dst) | 1fcb76ff722fbf0a43c125a4ff42405b12d54ec6 | 9,603 |
def irpf(base, porcentaje=12.5,prorrateado=False):
"""
irpf (sueldo,porcentaje, prorrateado=boolean)
"""
if prorrateado:
cantidad_prorateada = base/6
if type(base)==float and type(porcentaje)==float:
return (base/100) * porcentaje
else:
return None | cbc3fbb115f9896a5e852126b242a22c49bba66b | 9,604 |
def get_by_index(items, index):
"""
Return an element of a list based on its index.
Usage in template::
{% load list_to_columns %}
{{ items|get_by_index:0 }}
Args:
``items`` (list): A list of elements.
``index`` (int): The position of the element to be returned.
Returns:
``element``. The list element with the given index.
"""
try:
return items[index]
except IndexError:
return None | a355560ab741ef821c62e9b11e77476d3ab7248c | 9,605 |
def bubble_sort(array):
"""
Sorts a list using bubble sort algorithm
Input: A list of integers
Output: A list containing the same integers as the input, but sorted in
ascending order
Sorts by iterating through a list swapping values that are in the wrong order.
Stops when it is impossible for unsorted elements to remain.
"""
for z in range(len(array)-1):
for x in range(len(array)-1):
for y in range(len(array)-1):
if(array[x]>array[x+1]):
temp = array[x]
array[x] = array[x+1]
array[x+1] = temp
return array | 5af7e90dd423ac57bd6aa3b27e139d35c814a45f | 9,608 |
from typing import Dict
from typing import Any
def _read_info(info_filename: str) -> Dict[str, Any]:
"""read info into dictionary"""
with open(info_filename, "r") as info_file:
lines = info_file.readlines()
info = {}
for line in lines:
key, val = line.strip().split("\t")
if key == "stroke":
if val is not None:
info[key] = float(val)
else:
info[key] = int(val)
return info | 884a0a309380e1b077c5d1830ca3b6803804644b | 9,609 |
import importlib
def load_connector(connector_info):
"""instantiate the connector class"""
connector = importlib.import_module(
f"bookwyrm.connectors.{connector_info.connector_file}"
)
return connector.Connector(connector_info.identifier) | e105c74d7f497132df439ff9e561d5492adb4c73 | 9,610 |
import requests
def get_globally_rescoped_token(globaltoken, defaultid):
"""Summary - Get a global project scoped auth token
Returns:
STRING: Globally Scoped Object
Args:
globaltoken (string): valid global token
defaultid (string): default projct id
"""
identityURL = 'https://identity.gls.cloud.global.fujitsu.com/v3/auth/tokens'
try:
response = requests.post(identityURL,
headers={'Content-Type': 'application/json',
'Accept': 'application/json'},
json={
"auth": {
"identity": {
"methods": [
"token"
],
"token": {
"id": globaltoken
}
},
"scope": {
"project": {
"id": defaultid
}
}
}
})
return response
except:
return "Global Rescope Token Error" | d1e85b80862ab7e4bbe4c735fe5b6632eca64dcb | 9,612 |
def get_output_metadata(packer, sample_dim_name):
"""
Retrieve xarray metadata for a packer's values, assuming arrays are [sample(, z)].
"""
metadata = []
for name in packer.pack_names:
n_features = packer.feature_counts[name]
if n_features == 1:
dims = [sample_dim_name]
else:
dims = [sample_dim_name, "z"]
metadata.append({"dims": dims, "units": "unknown"})
return tuple(metadata) | 7a16ed6878d58be45a3cd63b0a5bec515ab6475e | 9,613 |
import re
def find_vendor(macs: str, neigh: list):
"""
This function searches for the NIC vendors in the IEEE DB
"""
# local vars
clean_mac_db = []
# Creating MAC DB in Python dictionary format
for entry in macs.splitlines():
if re.match("^[A-Z0-9]+\-", entry):
tc = {}
tc.update({"oui": entry.split(" ")[0]})
tc.update({"vendor": entry.split("\t\t")[1]})
clean_mac_db.append(tc)
# Searching for vendors based on MAC
for entry in neigh:
entry.update({"vendor": None})
if entry["type"] == "ethernet":
# Subtracting OUI
mac_query = "-".join(entry["mac"].split("-")[0:3])
for sentry in clean_mac_db:
if sentry["oui"] == mac_query:
entry.update({"vendor": sentry["vendor"]})
return neigh | f0f95684e2aba25cd14f3923e980ebff530e9410 | 9,617 |
import itertools
def euler38():
"""Solution for problem 38."""
# '123456789' will be decomposed in at least two elements,
# the smallest being 4 at most characters long
sol = 0
digits = {str(d) for d in range(1, 10)}
for n in range(10000):
s = ""
for i in itertools.count(1):
s += str(n * i)
if len(s) >= len(digits):
if len(s) == len(digits) and set(s) == digits:
sol = max(sol, int(s))
break
return sol | c757460b10b07379dd372a6a7b40b1536e16d407 | 9,618 |
def builtin_swap(a, b):
"""Modify the stack: ( a b -- b a )."""
return (b, a) | a288f5484d45edb64513ffd0afcab6d03609faf6 | 9,619 |
def check_set_dimension():
"""
Validate a SetDimension and return all errors and warnings.
:returns: A list of 'errors' and a list of 'warnings'
"""
return list(), list() | df27e2bac845ce1dad24c3c01388c397210a238d | 9,620 |
from dateutil import tz
def nordic2Arrival(data, arrival_id):
"""
Function for converting a nordic file into a Arrival string
:param NordicData data: NordicData object to be converted
:param int arrival_id: arrival id of the assoc
:param int origin_id: origin id of the origin
:returns: arrival_string
"""
arrival_string = ""
station_code = data.station_code
ar_time = data.observation_time.replace(tzinfo=tz.tzutc()).timestamp()
jdate = data.observation_time.date()
station_assoc_id = -1
channel_id = -1
if data.sp_component is not None:
channel = data.sp_component.lower()
else:
channel = '-'
if channel == 'h':
channel = 'z'
if data.phase_type is not None:
iphase = data.phase_type
else:
iphase = '-'
stype = "-"
deltime = -1.0
azimuth = -1.0
delaz = -1.0
if data.apparent_velocity is not None:
slow = 110.7 / data.apparent_velocity
else:
slow = -1.0
delslo = -1.0
ema = -1.0
rect = -1.0
if data.max_amplitude is not None:
amp = data.max_amplitude
else:
amp = -1.0
if data.max_amplitude_period is not None:
per = data.max_amplitude_period
else:
per = -1.0
per = -1.0
logat = -1.0
clip = '-'
fm = '-'
snr = -1.0
if data.quality_indicator is not None:
qual = data.quality_indicator.lower()
else:
qual = '-'
auth = '-'
commid = 1
lddate = '-'
a_format = (
"{sta:6s} {ar_time:17.5f} {arid:8d} {jdate:8d} {stassid:8d} "
"{chanid:8d} {chan:8s} {iphase:8s} {stype:1s} {deltim:6.3f} "
"{azimuth:7.2f} {delaz:7.2f} {slow:7.2f} {delslo:7.2f} "
"{ema:7.2f} {rect:7.3f} {amp:10.1f} {per:7.2f} {logat:7.2f} "
"{clip:1s} {fm:2s} {snr:10.2f} {qual:1s} {auth:15s} {commid:8d} "
"{lddate:17s}"
)
arrival_string = a_format.format(
sta = station_code,
ar_time = ar_time,
arid = arrival_id,
jdate = int(jdate.strftime("%Y%j")),
stassid = station_assoc_id,
chanid = channel_id,
chan = channel,
iphase = iphase,
stype = stype,
deltim = deltime,
azimuth = -1.0,
delaz = delaz,
slow = slow,
delslo = delslo,
ema = ema,
rect = rect,
amp = amp,
per = per,
logat = logat,
clip = clip,
fm = fm,
snr = snr,
qual = qual,
auth = auth,
commid = commid,
lddate = lddate
)
return arrival_string | e93be2e4767fd92ca66f6b35498d6ceec8f27658 | 9,621 |
def readFASTA(text, results=dict()):
"""
@param text in FASTA format
@param results dict where results will be put. A new dict by default.
@return dictionnary with new entry like {sequence_name: sequence_str}
@note call this function only if biopython can't be used instead
"""
string = ''
name = ''
for line in text.split('\n'):
if len(line) > 0:
if line[0] == '>':
if len(string) > 0:
# append a copy of dna in dnas list
results[name] = string
string = ''
name = line[1:]
elif line[0] != '>':
# add line without \n last char to current readed dna
string += line
# add last line encountered
if len(string) > 0:
# append a copy of dna in dnas list
results[name] = string
# end
return results | 15413643afdc86d14b73a07371be82c08c7cf0e4 | 9,622 |
import subprocess
import shlex
def RunSingleCommand(command_and_env):
"""Runs a single command, and returns the return code and any info from
the run.
"""
command, env = command_and_env
try:
proc = subprocess.Popen(
shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
stdout, _ = proc.communicate()
return proc.returncode, stdout.decode('utf-8')
except OSError as e:
out = 'Exception found in command {0}. Exception is: {1}.'.format(
repr(command), str(e))
return -1, out
except Exception as e:
out = 'Unhandled exception in command {0}. Exception is: {1}.'.format(
repr(command), str(e))
return -1, out | d5e8e9a6e4b01a8259128d9b3c79a68b6c8e0e3f | 9,624 |
def get_sample_interval(info, chan_info):
"""
Get sample interval for one channel
"""
if info['system_id'] in [1, 2, 3, 4, 5]: # Before version 5
sample_interval = (chan_info['divide'] * info['us_per_time'] *
info['time_per_adc']) * 1e-6
else:
sample_interval = (chan_info['l_chan_dvd'] *
info['us_per_time'] * info['dtime_base'])
return sample_interval | e1a397ad9221c30b70997f0cfb296305e0ca7355 | 9,625 |
def get_input_artifact_location(job):
"""
Returns the S3 location of the input artifact.
"""
input_artifact = job["data"]["inputArtifacts"][0]
input_location = input_artifact["location"]["s3Location"]
input_bucket = input_location["bucketName"]
input_key = input_location["objectKey"]
return (input_bucket, input_key) | 282881315313b88882f1df8019f60ae88f654cab | 9,626 |
import os
def read_query(query_filename):
"""
Read a query from file and return as a string
Parameters
----------
query_filename: str name of the query. It will be looked for in the queries folder of this project
Returns
-------
query: str the query with placeholders for the query parameters, as a string to be formatted
"""
with open(os.path.join(os.path.dirname(__file__), "../queries/{}.rq".format(query_filename))) as fr:
query = fr.read()
return query | 65981ba85363d6bd1ae7be9062b978f5e8cfcfeb | 9,628 |
def hanoi(n, a, b, c):
"""
>>> hanoi(5, "a", "b", "c")
[('a', 'b'), ('a', 'c'), ('b', 'c'), ('a', 'b'), ('c', 'a'), ('c', 'b'), ('a', 'b'), ('a', 'c'), ('b', 'c'), ('b', 'a'), ('c', 'a'), ('b', 'c'), ('a', 'b'), ('a', 'c'), ('b', 'c'), ('a', 'b'), ('c', 'a'), ('c', 'b'), ('a', 'b'), ('c', 'a'), ('b', 'c'), ('b', 'a'), ('c', 'a'), ('c', 'b'), ('a', 'b'), ('a', 'c'), ('b', 'c'), ('a', 'b'), ('c', 'a'), ('c', 'b'), ('a', 'b')]
"""
if n == 0:
return []
else:
return hanoi(n-1, a, c, b) + [(a, b)] + hanoi(n-1, c, b, a) | 8b7f2a56dadc09619c24d5bcd44e3286c2727ed1 | 9,629 |
def get_all(log):
"""get all documents"""
return log.all() | 197ad6548e4fa76a8781c780918bf37e96666024 | 9,630 |
def addParams(params, subparser):
"""Add params to the given subparser"""
if (params is not None):
for param in params:
flags = ["--{}".format(param.name)]
if (param.alt is not None):
flags.append("-{}".format(param.alt))
if param.choices:
subparser.add_argument(*flags, choices=param.choices, default=param.default,
help=param.help)
elif param.accepts == "boolean":
subparser.add_argument(*flags, action='store_true', help=param.help)
elif param.accepts == "list":
subparser.add_argument(*flags, nargs='*', default=param.default,
help=param.help)
else:
subparser.add_argument(*flags, default=param.default, help=param.help)
return subparser | 59fc44d9bcc18c7b2750a182f519ff3be85b20ba | 9,631 |
def weighted_rating(clean_anime, quantile, mean):
"""
:var term: gets the total users who rated each anime.
"""
term = clean_anime['members'] / (quantile + clean_anime['members'])
return clean_anime['rating'] * term + (1 - term) * mean | 757808adcce0cce5c71a79d200bbaa03b6ebb878 | 9,632 |
from typing import Any
def _print_card(card: Any) -> str:
"""helper for ``_validate_msg``"""
try:
return card.write_card(size=8)
except RuntimeError:
return '' | 0d85f2b7bfec942b1ff6e5d0c80facd1bb4824a0 | 9,633 |
import hashlib
def getImageHash(img):
""" Calculates md5 hash for a given Pillow image. """
md5hash = hashlib.md5(img.tobytes())
return md5hash.hexdigest() | d7bd7e1857f6849143f07063c045ae206985d4a3 | 9,634 |
from typing import Counter
def character_replacement(s: str, k: int) -> int:
"""https://leetcode.com/problems/longest-repeating-character-replacement"""
counter = Counter()
left = 0
max_length = 0
current_max_frequency = 0
for right, char in enumerate(s):
counter[char] += 1
current_max_frequency = max(current_max_frequency, counter[char])
if current_max_frequency + k < right - left + 1:
trailing_symbol_to_drop = s[left]
counter[trailing_symbol_to_drop] -= 1
left += 1
max_length = max(max_length, right - left + 1)
return max_length | ad67c5fbff990c0701d0c487b5aa6832d8bc2691 | 9,635 |
import ssl
def create_ssl_context(verify=True, cafile=None, capath=None):
"""Set up the SSL context.
"""
# This is somewhat tricky to do it right and still keep it
# compatible across various Python versions.
try:
# The easiest and most secure way.
# Requires either Python 2.7.9 or 3.4 or newer.
context = ssl.create_default_context(cafile=cafile, capath=capath)
if not verify:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
except AttributeError:
# ssl.create_default_context() is not available.
try:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
except AttributeError:
# We don't even have the SSLContext class. This smells
# Python 2.7.8 or 3.1 or older. Bad luck.
return None
context.options |= ssl.OP_NO_SSLv2
context.options |= ssl.OP_NO_SSLv3
if verify:
context.verify_mode = ssl.CERT_REQUIRED
if cafile or capath:
context.load_verify_locations(cafile, capath)
else:
context.set_default_verify_paths()
else:
context.verify_mode = ssl.CERT_NONE
return context | fe8db07f3d0043224cb3ca739fa43a1e3e69fdae | 9,637 |
def compressnumbers(s):
"""Take a string that's got a lot of numbers and try to make something
that represents that number. Tries to make
unique strings from things like usb-0000:00:14.0-2
"""
n = ''
currentnum = ''
for i in s:
if i in '0123456789':
#Exclude leading zeros
if currentnum or (not(i=='0')):
currentnum+=i
else:
n+=currentnum
currentnum=''
return n+currentnum | 952fe1a56a1864ce41031bb9bbea0fd440f9c3ef | 9,638 |
def _get_tile_output_shape(shape, multiples):
"""compute output shape of tile"""
if multiples is None:
return shape
if not isinstance(shape, (list, tuple)):
raise TypeError("Input shape of Tile must be of type list or tuple")
if not isinstance(multiples, (list, tuple)):
raise TypeError("multiples of Tile must be of type list or tuple")
shape = list(shape)
multiples = list(multiples)
diff_len = len(multiples) - len(shape)
if diff_len < 0:
raise ValueError("Dimensions of multiples{} < dimensions of input{} in Tile".format(multiples, shape))
if diff_len > 0:
for _ in range(diff_len):
shape.insert(0, 1)
shape_compatible = True
output_shape = []
input_reshape = []
output_reshape = []
for sh, mul in list(zip(shape, multiples)):
dim = sh * mul
output_shape.append(dim)
if sh == 1 or mul == 1:
input_reshape.append(sh)
output_reshape.append(dim)
else:
shape_compatible = False
input_reshape.append(1)
input_reshape.append(sh)
output_reshape.append(mul)
output_reshape.append(sh)
return output_shape, input_reshape, output_reshape, shape_compatible | ca787b55bed7591c6cf46aa6908d2c8d7053e755 | 9,639 |
import csv
def LoadVNSIM(nsim_csv):
"""Returns dictionary with degraded file key and mean nsim value.
The CSV should have three values: reference path, degraded path, nsim value
Args:
nsim_csv: Path to CSV file with NSIM values, format described above.
Returns:
Dictionary with degraded file key and NSIM value.
"""
nsim_dict = {}
with open(nsim_csv, 'r') as csvfile:
nsim_reader = csv.reader(csvfile)
for row in nsim_reader:
# Skip header
if row[2] == 'vnsim':
continue
# Keep the degraded file without the directory info as key.
# This will match what the mos dictionary has.
deg_file = row[1].split('/')[-1]
nsim_dict[deg_file] = row[2]
return nsim_dict | 919d1dcffab7e4a78e0ced2cbeee01126d586c27 | 9,641 |
def getCharOverlapCount(from1, to1, from2, to2):
"""Calculates the number of overlapping characters of the two given areas."""
#order such that from1 is always prior from2
if from1 > from2:
tmp = from1
from1 = from2
from2 = tmp
tmp = to1
to1 = to2
to2 = tmp
if from2 >= from1 and from2 <= to1:
if to2 > to1:
return to1 - from2 + 1
else:
return to2 - from2 + 1
else:
return 0 | 66ea7cbc9408d41de002c96e40705d4dd45f9ad5 | 9,642 |
import os
def rename_to_text(file_path):
"""
Appends a .txt to all files run since some output files do not have an extension
:param file_path: input file path
:return: .txt appended to end of file name
"""
file = file_path.split('/')[-1]
if file.endswith('.txt') is False:
new_file_name = file_path+'.txt'
os.rename(file_path, new_file_name)
file_path = new_file_name
filename = file
return file_path, filename | 76fe32c503d93fd0277cdf61d31fc4195835e355 | 9,643 |
import argparse
def get_args():
"""Gets parsed command-line arguments.
Returns:
Parsed command-line arguments.
"""
parser = argparse.ArgumentParser(description="plays Ms. Pac-Man")
parser.add_argument("--no-learn", default=False, action="store_true",
help="play without training")
parser.add_argument("--episodes", default=1, type=int,
help="number of episodes to run")
parser.add_argument("--learning-rate", default=0.01, type=float,
help="learning rate")
parser.add_argument("--no-display", action="store_false", default=True,
help="do not display the game on the screen (faster)",
dest="display")
parser.add_argument("--map-display", action="store_true", default=False,
help="whether to display the map on screen or not")
parser.add_argument("--seed", default=None, type=int,
help="seed for random number generator to use")
return parser.parse_args() | bed5ed41a952f7c0cb56335ad4ba1d1e83c00fc0 | 9,645 |
import re
def clean_python_name(s):
"""Method to convert string to Python 2 object name.
Inteded for use in dataframe column names such :
i) it complies to python 2.x object name standard:
(letter|'_')(letter|digit|'_')
ii) my preference to use lowercase and adhere
to practice of case-insensitive column names for data
Based on
https://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python
Example:
.. code:: python
df.rename(columns=clean_python_name)
Args:
- s (str): string to be converted
Returns:
str: cleaned string
"""
# Remove leading characters until we find a letter or underscore, and remove trailing spaces
s = re.sub('^[^a-zA-Z_]+', '', s.strip())
# Replace invalid characters with underscores
s = re.sub('[^0-9a-zA-Z_]', '_', s)
return s.lower() | d77eaa81607aabf8cae62e2a9c36a51e8428aac4 | 9,646 |
import pickle
import os
def open_pickle_jar(directory, filename):
"""loads .pkl file"""
return pickle.load(open(os.path.join(directory, filename), 'rb')) | 89bc2be018b2e9acfcb713921b232129f9cfc1f4 | 9,647 |
def get_best_of_n_avg(seq, n=3):
"""compute the average of first n numbers in the list ``seq``
sorted in ascending order
"""
return sum(sorted(seq)[:n])/n | 6166bbeda10d81356a86151901f33a26f0ff1035 | 9,648 |
def count_distinct_col(curs, table_name, col='y'):
"""Queries to find number of distinct values of col column in table in
database.
Args:
curs (sqlite3.Cursor): cursor to database
table_name (str): name of table to query
col (str): name of column to find number of distinct values for
Returns:
(int) number of distinct values for col"""
return curs.execute("""SELECT COUNT(DISTINCT {})
FROM {};""".format(col, table_name)).fetchone()[0] | c346b8463eeb4faec645917831f7bde8f42ed5e1 | 9,650 |
from pathlib import Path
import os
def does_file_exist(file_path: Path) -> bool:
"""Check for file existence."""
print(f"... Checking if file [{file_path}] exists")
if os.path.isfile(file_path):
print("...... File exists...")
return True
else:
return False | e5f08fccd30fdc7ace9aab2e03f13b584275a97a | 9,651 |
def reduce_dataset_by_column_value(df, colname, values):
"""Returns the passed dataframe, with only the passed column values"""
col_ids = df[colname].unique()
nvals = len(col_ids)
# Reduce dataset
reduced = df.loc[df['locus_tag'].isin(values)]
# create indices and values for probes
new_ids = reduced[colname].unique()
nvals = len(new_ids)
new_lookup = dict(zip(new_ids, range(nvals)))
# add data column with probe index from probe_lookup
reduced['{0}_index'.format(colname)] =\
reduced[colname].replace(new_lookup).values
return reduced | ee8411dd5e1152b1ae1a9b78a70396c03a0c0f7c | 9,652 |
import struct
def encode_string(input_string):
"""Encode the string value in binary using utf-8
as well as its length (valuable info when decoding
later on). Length will be encoded as an unsigned
short (max 65535).
"""
input_string_encoded = input_string.encode("utf-8")
length = len(input_string_encoded)
length_encoded = struct.pack("<H", length)
return length_encoded + input_string_encoded | ecb26ce97cbebfe79b694e96b6e16d50069858b4 | 9,656 |
import argparse
def handle_args():
"""Parse out arguments"""
parser = argparse.ArgumentParser(description="Autogenerates a script to mock the output of a command",
epilog="Example: cmdmock sensors -u")
#parser.add_argument('-i', '--interactive', action='store_true',
# help='Interactively enter a series of invocations')
parser.add_argument('-f', '--file', dest='training_file',
help='Optionally specify a training file with line-separated invocations')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=False,
help='Print informative messages')
#parser.add_argument('invocation', help='command to be run with options')
args = parser.parse_args()
return args | a3ac358b59dc8fa80a003f1674fa231ba13dccb5 | 9,657 |
def parse_vmin_vmax(container, field, vmin, vmax):
""" Parse and return vmin and vmax parameters. """
field_dict = container.fields[field]
if vmin is None:
if 'valid_min' in field_dict:
vmin = field_dict['valid_min']
else:
vmin = -6 # default value
if vmax is None:
if 'valid_max' in field_dict:
vmax = field_dict['valid_max']
else:
vmax = 100
return vmin, vmax | a7c096e4648662a5efe59c38de016e586c718ffb | 9,658 |
def float2bin(afloat: float):
"""์์์ ์ดํ์ ๊ฐ์ด ์๋ ์ค์๋ฅผ ์ด์ง์๋ก ๋ณํ
Args:
afloat: ์ด์ง์๋ก ๋ณํํ ์ค์
Returns:
์ด์ง์ ('0b' prefix๊ฐ ์๋ ์ค์์ ํํ)
"""
integer_part = int(afloat) # ์ ์๋ถ๋ถ
decimal_part = afloat - integer_part # ์์๋ถ๋ถ
decimal_bin = "." # ์์๋ถ๋ฌธ์ ๋ํ ์ด์งํํ
while decimal_part != 0.0:
foo = decimal_part * 2
bar = int(foo)
decimal_bin += str(bar)
decimal_part = foo - bar
return float(bin(integer_part)[2:] + decimal_bin) | 7873450858b3753a5da1f6a70f96d82204e37f8b | 9,660 |
def fib_mem(n, computed={0:0,1:1}):
"""find fibonacci number using memoization"""
if n not in computed:
computed[n] = fib_mem(n-1, computed) + fib_mem (n-2, computed)
return computed[n] | 5d25c22ccdc5ea41fbd0faf21a8b35ac535acaef | 9,661 |
def get_hash_tuple(a_dict, included_keys=None):
""" Helps in hashing a dictionary by flattening it to a flat list of its keys and values, and then converting it into a tuple (which is what the hash function expects). """
if included_keys is not None:
a_dict = {included_key:a_dict[included_key] for included_key in included_keys} # filter the dictionary for only the keys specified
member_names_tuple = list(a_dict.keys())
values_tuple = list(a_dict.values())
combined_tuple = tuple(member_names_tuple + values_tuple)
return combined_tuple | b35feea54e6e4446ac1097487445027a07751910 | 9,662 |
import sys
def _check_python_ok_for_pygame():
"""If we're on a Mac, is this a full Framework python?
There is a problem with PyGame on Macs running in a virtual env.
If the Python used is from the venv, it will not allow full window and
keyboard interaction. Instead, we need the original framework Python
to get PyGame working properly.
The problem doesn't occur on Linux and Windows.
"""
if sys.platform == 'darwin': # This is a Mac
return 'Library/Frameworks' in sys.executable
else:
return True | f3b8b0db2c853a38bb4ce35fdaf4737732378e4a | 9,663 |
def get_cfg_option(cfg, sec, opt, verbose=False):
"""
Retrieve value of a specific option of a configuration.
Parameters
----------
cfg : configparser.ConfigParser()
Configuration as retrieved by the function read_cfg_file().
sec : str
The section in which the option is located.
opt : str
The option that should be retrieved.
verbose : bool, optional
Print info, if either section or option could not be found in cfg.
Returns
-------
str
Value of the option
"""
if sec not in cfg:
verbose and print("Section '%s' is not in configuration '%s'"
% (sec, cfg))
return None
if opt not in cfg[sec]:
verbose and print("Option '%s' is not in section '%s'"
% (opt, sec))
return None
option = cfg[sec][opt]
return option | 1f387c63d241f1364aa17caec76efe3b33f41b88 | 9,664 |
import sys
def fn_name(depth = 0):
"""Get the function name from the call stack.
Args:
depth: call stack depth to return, 0=parent, 1=grandparent, etc.
Returns:
The function name from the call stack, at the depth given.
"""
return sys._getframe(depth + 1).f_code.co_name | c743c9849bb3f33e865e588fe149d179aee16e00 | 9,665 |
def members_to_rep(otus):
"""Provides a lookup for a cluster member to its rep"""
res = {}
for rep, members in otus.iteritems():
for member in members:
res[member] = rep
return res | fa2a1e951a40a2872572883cf814260da8093e3a | 9,666 |
def num_2_byte_list(num):
"""
convert num to byte list
:param num:
:return:
"""
byte = []
while num > 0:
b = num & 0xff # ่ทๅๆไฝไฝ็ไธไธชๅญ่็ๅผ
byte.append(b)
num = num >> 8 # ็งป้คๆไฝไฝ็ไธไธชๅญ่
return list(reversed(byte)) | bdd80bb1a42c07f0cddc9b6a27c88204df58d7a8 | 9,667 |
def update_accel_time(driver, at):
"""
Updates the accel time of the driver
:param driver: driver
:param at: new accel time
:type driver: DriverProfile
:return: updated driver profile
"""
return driver.update_accel_time(at) | 897c4d7ed30dc82c85481064653dea679755dc68 | 9,668 |
def _simplify(shp, tol=0.05):
"""
Generate a simplified shape, within a specified tolerance.
"""
simp = None
for thresh in [0.001, 0.0005, 0.0004, 0.0003, 0.0002, 0.0001]:
simp = shp.simplify(thresh)
if shp.difference(simp).area / shp.area < tol:
break
return simp | bf4ba42ce612477aa9606d7c710a242fbcf9dd61 | 9,669 |
import re
def _match(pattern, value):
"""User-defined function to implement SQL MATCH/STIX MATCHES"""
return bool(re.match(pattern, value)) | e644a5bf45cf298dec3c94b0782ab6fe606b8fdf | 9,670 |
from typing import Callable
def check_callback(callback):
"""
Check if callback is a callable or a list of callables.
"""
if callback is not None:
if isinstance(callback, Callable):
return [callback]
elif (isinstance(callback, list) and
all([isinstance(c, Callable) for c in callback])):
return callback
else:
raise ValueError("callback should be either a callable or "
"a list of callables.")
else:
return [] | 1e0be3680c934a79777dbe99a47ecc406df19d2a | 9,671 |
import re
def compress_json(json):
"""Compresses json output by removing quotes from keys. This makes the
return value invalid json (all json keys must be double-quoted), but
the result is still valid javascript."""
return re.sub(r'"(\w+)":', r'\1:', json) | 63aa497580ef4fe3d6d8d6c830652a7369f65723 | 9,672 |
import builtins
def is_bytes(value):
"""Indicate if object is bytes-like.
future.utils.isbytes is deprecated, so re-implementing, as per their
recommendation.
"""
return isinstance(value, builtins.bytes) | baec6eca7b2ddf6bf95b1a93cae5821fe4cc3d19 | 9,673 |
import os
def find_languages(folders):
""" Find the languages in the translations folders.
Langauges are stored as subfolders with a two letter langauge code
Args:
folders: folders to loo through
Returns:
langauges
"""
languages = []
for folder in folders:
subfolder = os.listdir(folder)
for potential_lang in subfolder:
if len(potential_lang) == 2:
languages.append(potential_lang)
return languages | d03f40fa617f8ed39c169767e6ccc6b31ebd8d37 | 9,674 |
def fasta_name_seq(s):
"""
Interprets a string as a FASTA record. Does not make any
assumptions about wrapping of the sequence string.
"""
DELIMITER = ">"
try:
lines = s.splitlines()
assert len(lines) > 1
assert lines[0][0] == DELIMITER
name = lines[0][1:]
sequence = "".join(lines[1:])
return (name, sequence)
except AssertionError:
raise ValueError("String not recognized as a valid FASTA record") | e450455710a945df25ad2cf4cc3c7f9ad662e7d3 | 9,676 |
def get_commit_timestamps(commits):
"""Get all commit timestamps for the given ebuild.
Args:
commits (list[Commit]): The commits in question.
Returns:
list[int]: The uprev commit unix timestamps, in order.
"""
return [int(commit.timestamp) for commit in commits] | 72694219664d0d6cf83d793e9ccce2b0642ec89f | 9,678 |
def commonprefix(m):
"""Given a list of pathnames, returns the longest common leading component without trailing /"""
if not m:
return ""
m = [p.rstrip("/").split("/") for p in m]
s1 = min(m)
s2 = max(m)
s = s1
for i, (c1, c2) in enumerate(zip(s1, s2)):
if c1 != c2:
s = s1[:i]
break
return "/".join(s) | 4d0bb25fd1eb1dbcd4ee28f67b61574751b6e091 | 9,681 |
def edgecolor_by_source(G, node_colors):
""" Returns a list of colors to set as edge colors based on the source node for each edge.
Parameters
----------
G : graph.
A networkx graph.
node_colors : list
List of node colors.
Example
--------
>>> colormap = {'male':'b','female':'r'}
>>> node_colors = set_node_color(G,"gender",colormap)
>>> edge_colors = edgecolor_by_source(G,node_colors)
Returns
-------
list
list of colors for each edge of the graph color set by the source node.
"""
edge_colormap = []
node_colormap = dict(zip(G.nodes(), node_colors))
for edge in G.edges():
edge_colormap.append(node_colormap[edge[0]])
return edge_colormap | 961205e100cf208f5471c08afdd8b3c7328713c0 | 9,682 |
def _ignore_filter(referrers, ignore, extraids, getid=id):
""" Ignore objects on the referrers list if ignore(x) is true or if x is in extra """
r = []
for ref in referrers:
if ignore is not None:
extraids.update(set([getid(o) for o in ignore(ref)]))
if getid(ref) in extraids: continue
r.append(ref)
return r | e13a28ba1610de9d6bc835a03cf02a0f6752f7b2 | 9,683 |
import numpy as np
def deserialize_numpy(serialized_np, shape):
"""
Deserializes a numpy array from a JSON-compatible string.
from https://stackoverflow.com/questions/30698004/how-can-i-serialize-a-numpy-array-while-preserving-matrix-dimensions#30699208
Parameters
----------
serialized_np : str
A serialized numpy array
shape : tuple of ints
The shape of the serialized array
Returns
-------
np_array : numpy.ndarray
The deserialized numpy array
"""
dt = np.dtype('float')
dt.newbyteorder('>') # set to big-endian
np_array = np.frombuffer(serialized_np, dtype=dt)
np_array = np_array.reshape(shape)
return np_array | b8981fc98909eb570e59c36c9c0f003e445e2358 | 9,684 |
def build_tuple_for_feet_structure(quantity):
"""
Builds the tuple required to create a FeetAndInches object
:param quantity: string containing the feet, inches, and fractional inches
:return: tuple containing feet, inches, and calculated fractional inches
"""
feet = float(quantity[0])
inches = float(quantity[1])
fractional_inches = quantity[2].split('/')
return feet, inches, int(fractional_inches[0])/int(fractional_inches[1]) | 2a66e7bf859e120d224c097a628445342a987067 | 9,685 |
def _all(itr):
"""Similar to Python's all, but returns the first value that doesn't match."""
any_iterations = False
val = None
for val in itr:
any_iterations = True
if not val:
return val
return val if any_iterations else True | bb1145abaaaa1c6910371178ca5ebe68600bb287 | 9,686 |
def no_init(_data, weights):
"""
Return the entered weights.
Parameters
----------
_data: ndarray
Data to pick to initialize weights.
weights: ndarray
Previous weight values.
Returns
-------
weights: ndarray
New weight values
Notes
-----
Useful when it is needed a function that accepts two parameters as all others
weight init functions, but it is no needed to calculate any new value.
"""
return weights | f120b49ab26fa1051360b4e4ae85dd07025ae5cc | 9,687 |
def primeFactors(someInt):
"""
return a list of the prime factors of
someInt.
e.g.
primeFactors(24)=[2,2,2,3]
primeFactors(23)=[23]
primeFactors(25)=[5,5]
"""
return "stub" | 4afe8491585721852571ecda89c7cd33fb05d1f7 | 9,688 |
import uuid
def is_valid_uuid(val):
"""
Check if a string is a valid uuid
:param val: uuid String
:return: Returns true if is a string is a valid uuid else False
"""
try:
uuid.UUID(str(val))
return True
except ValueError:
return False | d04f658d3ae2fa85377e110b0a6716bc34ee9df0 | 9,689 |
def convert_to_hexadecimal(bits, padding):
"""
Converts bits to a hexadecimal character with padding.
E.g.
Converts [False, False, False, True], 0 to "1".
Converts [True, False, False, False], 2 to "08"
Args:
bits: List of boolean values.
padding: Integer of number of 0 padded places.
Returns:
string: Zero padded hexadecimal number.
"""
bits_as_strings = ["1" if bit else "0" for bit in bits]
bits_base_2 = int("".join(bits_as_strings), 2)
zero_padded_eight_digit_hexadecimal_with_prefix = "{0:#0{1}x}".format(bits_base_2, padding + 2)
zero_padded_eight_digit_hexadecimal_without_prefix = zero_padded_eight_digit_hexadecimal_with_prefix[2:]
return zero_padded_eight_digit_hexadecimal_without_prefix.upper() | b8cd1647a24072278aca65f7734934acd93d8f12 | 9,690 |
import string
def _sanitize_title(title):
""" Remove all non alphanumeric characters from title and lowercase """
alphanumeric = string.ascii_lowercase + string.digits + ' '
title = title.lower()
title = "".join(filter(lambda x: x in alphanumeric, title))
return title | 6f0d1818140bc2a50b160f73b8b4590be8f31891 | 9,691 |
def IsBefore(version, major, minor, revision):
"""Decide if a given version is strictly before a given version.
@param version: (major, minor, revision) or None, with None being
before all versions
@type version: (int, int, int) or None
@param major: major version
@type major: int
@param minor: minor version
@type minor: int
@param revision: revision
@type revision: int
"""
if version is None:
return True
return version < (major, minor, revision) | 3fe9f995b90d7406d0b0366b0bbe5a940f975893 | 9,692 |
def handle_trace_length(state_trace_length):
"""
transform format of trace length
:return:
"""
trace_length_record = []
for length in state_trace_length:
for sub_length in range(0, int(length)):
trace_length_record.append(sub_length + 1)
return trace_length_record | 39631247d10dbaa024a0e8d553024718150ccc51 | 9,693 |
def no_auth(request):
"""
Use this if no auth is desired
"""
return request | 5871518399aee8204d2ece4c8bad575527270627 | 9,695 |
def total_size(metainfo):
"""
Compute sum of all files' size
"""
if metainfo.has_key('files'):
total = 0
for infile in metainfo['files']:
if infile.has_key('length'):
total += infile['length']
return total
else:
return None | 84deea16534e35f2c3c86c9674a8e83f880bd5a5 | 9,696 |
import random
import string
def makeHTMLword(body, fontsize):
"""take words and fontsize, and create an HTML word in that fontsize."""
#num = str(random.randint(0,255))
# return random color for every tags
color = 'rgb(%s, %s, %s)' % (str(random.randint(0, 255)), str(random.randint(0, 255)), str(random.randint(0, 255)))
# get the html data
wordStr = '<span style=\"font-size:%spx;color:%s;float:left;\">%s</span>'
return wordStr % (str(fontsize), color, body)
#def generatetagcloud(string, filename):
"""accept a string and generate a tag cloud using pytagcloud"""
tags = make_tags(get_tag_counts(string), minsize=10, maxsize=120)
create_tag_image(tags, filename.split('.')[0] + '.' + 'png', background=(0, 0, 0), size=(800, 600), fontname='Droid Sans', rectangular=False) | 00bb65217f4b75344a38100cee88c1e928aa0698 | 9,697 |
import os
import subprocess
import sys
def exec_command(command, cwd=None, stdout=None, env=None):
"""Returns True in the command was executed successfully"""
try:
command_list = command if isinstance(command, list) else command.split()
env_vars = os.environ.copy()
if env:
env_vars.update(env)
subprocess.check_call(command_list, stdout=stdout, cwd=cwd, env=env_vars)
return True
except subprocess.CalledProcessError as err:
print(err, file=sys.stderr)
return False | 5849fd97dfa08d8402f08e730c3364d2c1e0d15c | 9,698 |
def ts_grismc_sim(pixels):
"""
Simple analytic wavelength calibration for Simulated GRISMC data
"""
disp = 0.0010035 ## microns per pixel (toward positive X in raw detector pixels, used in pynrc)
undevWav = 4.0 ## undeviated wavelength
undevPx = 1638.33
wavelengths = (pixels - undevPx) * disp + undevWav
return wavelengths | f3491fea1fa1e8833384711076e6187f0f6cb42b | 9,699 |
def get_profile_avatar_upload_to(instance, filename):
""" Returns a valid upload path for the avatar associated with a forum profile. """
return instance.get_avatar_upload_to(filename) | 840e7482b225c0a456dbc8cd967203aa542945f8 | 9,700 |
def parse_bool_token(token):
"""
Parses a string token to convert it to its equivalent boolean value ignoring
the case of the string token or leaves the token intact if it cannot.
:param token:
String to convert to ``True`` or ``False``.
:type token:
``str``
:return:
``True`` or ``False`` or the token itself if not converted.
Usage::
>>> parse_bool_token('FAlse')
False
>>> parse_bool_token('FalS')
'FalS'
>>> parse_bool_token('true')
True
>>> parse_bool_token('TRUE')
True
"""
return {'true': True, 'false': False}.get(token.lower(), token) | bd9de30ee85921ba72a46e83eb96a0af104f998d | 9,702 |
def video_content_to_dict(vid_info_list):
"""Convert YouTube metadata list to dictionary."""
video_dict = {}
for video in vid_info_list:
if not video:
continue
title = video["title"]
video_dict[title] = {"id": video["id"], "duration": video["duration"]}
return video_dict | e56b211e9879ce783a8b145f267778dcaa0550b9 | 9,703 |
def print_symbol_table_node_to_dot(node, cur_id):
"""
Print the node of a symbol table as html table
:param node: symbol table
:param cur_id: current node id
:return: dot string
"""
dot = "\t{} [\n\t shape=plaintext \n \tlabel=< <table border=\'0\' cellborder=\'1\' cellspacing=\'0\'\n\t>".format(
cur_id)
dot += "\t<tr><td colspan=\"2\"> {} </td></tr>\n".format(node.name)
for symbol, sym_type in node.symbols.items():
dot += "\t<tr>"
if sym_type.isFunction():
dot += "\t<td>{}</td>\n".format(sym_type.toString(True))
else:
dot += "\t<td>{}</td>\n".format(sym_type.toString())
dot += "\t<td>{}</td>\n".format(symbol)
dot += "\t</tr>\n"
dot += "\t</table> >];\n"
return dot | d4a1b2dcb8bbc971d8100e74f2c887ce452cd1b9 | 9,704 |
def create_getter(var_name):
""" Given the string name of a variable, creates a getter function for that
variable and returns it.
Args:
var_name the name of the variable for which a getter should be created.
"""
def getter(self):
""" The getter function to be returned."""
return getattr(self, var_name)
return getter | 7091280d2a804d818247ffcfa9e55c0d98bfe761 | 9,705 |
def predict_by_moving_avg_growth(stock, s, **_):
"""Returns predicted value of a stock
Predicts the next price of a stock by extrapolating the moving average and its growth.
Parameters
----------
stock : :obj:`stock`
Stock to be predicted.
s : int
Number of data points used to calculate a moving average.
Returns
-------
int
Predicted next price of the stock
Notes
_____
The moving average lags behind by n/2 + 0.5 periods when not centered around the mean.
"""
stockPriceHistory = len(stock.price_history)
if stockPriceHistory < s+1:
return None
else:
ma = sum(stock.price_history[-s:]) / s
growth = ma - sum(stock.price_history[-s-1:-1]) / s
predicted = ma + (s/2+0.5)*growth
if predicted > 0:
return predicted
else:
return 0 | c8e02b9c55bd339ff6f1793b697ba46647331326 | 9,706 |
import zlib
import base64
def _compress(s: bytes):
"""
Compresses bytes for the payload.
"""
co = zlib.compressobj(wbits=-zlib.MAX_WBITS)
b = co.compress(s) + co.flush()
return base64.b64encode(''.join(map(chr, b)).encode()) | 6c6f1b04670c55f0417991fd3f9ab19ad42fefec | 9,708 |
def _token_to_int(t, token_list, token_cache, size_limit=float('inf')):
"""Return the int which represents a token, with caching.
Throws a ValueError if token t is not in the token_list. There MUST
be a _UNK token at the beginning of your vocab, or this may not halt.
"""
if t not in token_cache:
if t == '!RET':
token_cache[t] = r'\n'
return '\n'
token = token_list.index(t)
if token >= size_limit: # too infrequent to include, >= for 0-index
token = _token_to_int('_UNK', token_list, token_cache)
token_cache[t] = token # cache this token
else:
token = token_cache[t]
return token | d2a46135197c38ff6ab08336fba9a4970fe6952f | 9,709 |
def select_data(df, countries_list, regions_list, ages_list, genders_list):
"""Extracts from the dataset the data corresponding to many criterias.
Parameters:
-----------
df : Pandas DataFrame
dataset
countries_list : list of str
countries to be selected
regions_list : list of str
regions to be selected
ages_list : list of int
age ranges to be selected
genders_list : list of str
genders to be selected
Returns:
-----------
The corresponding dataset (pandas DataFrame)
"""
df0 = df[df['Country'].isin(countries_list)]
df1 = df0[df0['Region'].isin(regions_list)]
df2 = df1[df1['Age'].isin(ages_list)]
df3 = df2[df2['Sex'].isin(genders_list)]
return df3 | de6e24966f3060728657a4cc6685c8203bfa85e7 | 9,710 |
import os
def is_subdir(child, parent):
"""
Determine if "child" is a subdirectory of "parent".
If child == parent, returns True.
"""
child_path = os.path.realpath(child)
parent_path = os.path.realpath(parent)
if len(child_path) < len(parent_path):
return False
for i in range(len(parent_path)):
if parent_path[i] != child_path[i]:
return False
return True | d9f7ba81fd4148b6945148bc447c0bc9693232f5 | 9,711 |
def textops_rawtexttolines(text, linedelimiter="\n"):
"""
<Purpose>
Converts raw text (a string) into lines that can be processed by the
functions in this module.
<Arguments>
text:
The text to convert into lines (basically, a sequence of strings).
linedelimiter (optional, defaults to "\n"):
The string denoting an EOL ("\n" on Unix, "\r\n" on Windows).
<Exceptions>
TypeError on bad parameters.
<Side Effects>
None.
<Returns>
A sequence of strings; each element is a line, with newlines removed.
"""
lines = text.split(linedelimiter)
if lines[len(lines)-1] == '':
lines = lines[:-1]
return lines | e146180f3dc02a036e84cbb04b5a4fd90b85abe8 | 9,712 |
def _velocity_factor(velocity_units_in: str, velocity_units_out: str) -> float:
"""helper method for convert_velocity"""
factor = 1.0
if velocity_units_in == 'm/s':
factor /= 0.3048
elif velocity_units_in == 'ft/s':
pass
elif velocity_units_in == 'in/s':
factor /= 12.
elif velocity_units_in == 'knots':
factor *= 1.68781
else:
raise RuntimeError(f'velocity_units_in={velocity_units_in} is not valid; use '
'[ft/s, m/s, in/s, knots]')
if velocity_units_out == 'm/s':
factor *= 0.3048
elif velocity_units_out == 'ft/s':
pass
elif velocity_units_out == 'in/s':
factor *= 12.
elif velocity_units_out == 'knots':
factor /= 1.68781
else:
raise RuntimeError(f'velocity_units_out={velocity_units_out!r} is not valid; use '
'[ft/s, m/s, in/s, knots]')
return factor | 6f2acab5bf14c61f44ac4c2a260d9d367462a5a1 | 9,713 |
import torch
def image_grid(img, row, col):
"""
img: N,h,w,x
collage: 1,.., x
"""
bs,h,w,c=img.shape
device = img.device
collage = torch.zeros(h*row, w*col, c).to(device)
for i in range(row):
for j in range(col):
collage[i*h:(i+1)*h,j*w:(j+1)*w] = img[i*col+j]
return collage | 3a5ee47ca5bbc652e3882be2d3c54fc572d670eb | 9,715 |
def percentage(value, precision=2):
"""Convert `float` to #.##% notation as `str`.
A value of 1 = `"100.00%"`; 0.5 = `"50.00%"`"""
return f"{value:.{precision}%}" | 7abd3fafa8fc6f8323ca448ff5022faa0f83aa60 | 9,717 |
import re
def findAlphanumeric(line):
"""Parse string to extract all non-numeric strings"""
return re.findall(r'^\w+', line) | 7ac210f35d347532ff9e68b4fd6f6f978b0c61ea | 9,718 |
import os
def dir_size(path):
"""
Get the size of the directory represented by path recursively.
:param path: Path to the dir whose size needs to be calculated
:return: size in bytes of the dir
"""
# Closure for recursiveness
def get_dir_size(path):
size = 0
for entry in os.scandir(path):
if entry.is_symlink():
continue
if entry.is_file():
size += entry.stat(follow_symlinks=False).st_size
elif entry.is_dir():
size += get_dir_size(entry.path)
return size
return get_dir_size(path) | 4bee3b7013b3bd98d677fe2cee4bc59bfe9fd04b | 9,719 |
def rep(data):
"""Checks if all labels are represented
in the dataset `data`."""
labels = [0, 1]
# Iteratively check if all labels are represented
for i in range(len(data) - 1):
row = data[i]
label = row["label"]
contains = label in labels
if contains and labels:
# If label found, remove from list
labels.pop(label)
elif not labels:
# List is empty, so all labels
# are represented
return True
return False | 803c561c48fec10c44154138e83e95405054dad5 | 9,720 |
import re
def quota_size(value):
"""
Covnert a human readable quota size into a number of bytes.
"""
_UNITS_RE = re.compile('^\s*(?P<value>\d+(\.(\d*)?)?)\s*(?P<scale>[kMGTP]i?)?B?$')
_UNITS_SCALES = {None: 1,
'k' : 1000, 'ki': 1024,
'M' : 1000**2, 'Mi': 1024**2,
'G' : 1000**3, 'Gi': 1024**3,
'T' : 1000**4, 'Ti': 1024**4,
'P' : 1000**5, 'Pi': 1024**5
}
value = str(value)
mtch = _UNITS_RE.match(value)
if mtch is None:
raise ValueError("Cannot interpret '%s' as a quota size" % value)
value = float(mtch.group('value'))
value *= _UNITS_SCALES[mtch.group('scale')]
return int(value) | fa464aa6a87fa36ed32ca9506d0a5fa4c6474d6d | 9,721 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.