content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def removeBots(gdf, bot_list):
"""
A Function for removing Twitter bots.
Parameters
----------
gdf: <gpd.GeoDataFrame>
A GeoDataFrame from which Twitter bots should be removed.
bot_list: <list>
Input either 'home_unique_days' or 'home_unique_weeks'
Output
------
<gpd.GeoDataFrame>
A processed GeoDataFrame. Likely bots removed.
"""
copy = gdf
for index, row in gdf.iterrows():
userid = str(row['user']['id'])
for item in bot_list:
bot_id = item['userid']
if bot_id == userid:
gdf = gdf.drop(index)
print("A bot dropped: ID", userid, ". Length of GDF now: ", len(gdf))
print("Processing: ", index, "/", len(copy))
return(gdf) | e938f46bcf5c87dfa81db96f127c88d948f061db | 8,300 |
def getinput(prompt):
""">> getinput <prompt>
Get input, store it in '__input__'.
"""
local_dict = get_twill_glocals()[1]
inp = input(prompt)
local_dict['__input__'] = inp
return inp | db26e8361518f1728edfb15c6417586f8c3ca73d | 8,301 |
import os
import torch
def plot_qualitative_with_kde(
named_trainer,
dataset,
named_trainer_compare=None,
n_images=8,
percentiles=None, # if None uses uniform linspace from n_images
figsize=DFLT_FIGSIZE,
title=None,
seed=123,
height_ratios=[1, 3],
font_size=12,
h_pad=-3,
x_lim={},
is_smallest_xrange=False,
kdeplot_kwargs={},
n_samples=1,
upscale_factor=1,
**kwargs,
):
"""
Plot qualitative samples using `plot_posterior_samples` but select the samples and mask to plot
given the score at test time.
Parameters
----------
named_trainer : list [name, NeuralNet]
Trainer (model outputted of training) and the name under which it should be displayed.
dataset :
named_trainer_compare : list [name, NeuralNet], optional
Like `named_trainer` but for a model against which to compare.
n_images : int, optional
Number of images to plot (at uniform interval of log like). Only used if `percentiles` is None.
percentiles : list of float, optional
Percentiles of log likelihood of the main model for which to select an image. The length
of the list will correspond to the number fo images.
figsize : tuple, optional
title : str, optional
seed : int, optional
height_ratios : int iterable of length = nrows, optional
Height ratios of the rows.
font_size : int, optional
h_pad : int, optional
Padding between kde plot and images
x_lim : dict, optional
Dictionary containing one (or both) of "left", "right" correspomding to the x limit of kde plot.
is_smallest_xrange : bool, optional
Whether to rescale the x axis based on the range of percentils.
kdeplot_kwargs : dict, optional
Additional arguments to `sns.kdeplot`
upscale_factor : float, optional
Whether to upscale the image => extrapolation. Only if not uniform grid.
kwargs
!VERY DIRTY
"""
kwargs["n_samples"] = n_samples
kwargs["is_plot_std"] = False
kwargs["is_add_annot"] = False
if percentiles is not None:
n_images = len(percentiles)
plt.rcParams.update({"font.size": font_size})
fig, axes = plt.subplots(
2, 1, figsize=figsize, gridspec_kw={"height_ratios": height_ratios}
)
# a dictionary that has "upscale_factor" which is needed for downscaling when plotting
# only is not grided
CntxtTrgtDictUpscale = partial(CntxtTrgtDict, upscale_factor=upscale_factor)
def _plot_kde_loglike(name, trainer):
chckpnt_dirname = dict(trainer.callbacks_)["Checkpoint"].dirname
test_eval_file = os.path.join(chckpnt_dirname, EVAL_FILENAME)
test_loglike = np.loadtxt(test_eval_file, delimiter=",")
sns.kdeplot(
test_loglike, ax=axes[0], shade=True, label=name, cut=0, **kdeplot_kwargs
)
sns.despine()
return test_loglike
def _grid_to_points(selected_data):
cntxt_trgt_getter = GridCntxtTrgtGetter(upscale_factor=upscale_factor)
for i in range(n_images):
X = selected_data["Y_cntxt"][i]
X_cntxt, Y_cntxt = cntxt_trgt_getter.select(
X, None, selected_data["X_cntxt"][i]
)
X_trgt, Y_trgt = cntxt_trgt_getter.select(
X, None, selected_data["X_trgt"][i]
)
yield CntxtTrgtDictUpscale(
X_cntxt=X_cntxt, Y_cntxt=Y_cntxt, X_trgt=X_trgt, Y_trgt=Y_trgt
)
def _plot_posterior_img_selected(name, trainer, selected_data, is_grided_trainer):
is_uniform_grid = isinstance(trainer.module_, GridConvCNP)
kwargs["img_indcs"] = []
kwargs["is_uniform_grid"] = is_uniform_grid
kwargs["is_return"] = True
if not is_uniform_grid:
if is_grided_trainer:
grids = [
plot_posterior_samples(
dataset, data, trainer.module_.cpu(), **kwargs
)
for i, data in enumerate(_grid_to_points(selected_data))
]
else:
grids = [
plot_posterior_samples(
dataset,
CntxtTrgtDictUpscale(
**{k: v[i] for k, v in selected_data.items()}
),
trainer.module_.cpu(),
**kwargs,
)
for i in range(n_images)
]
# images are padded by 2 pixels inbetween each but here you concatenate => will pad twice
# => remove all the rleft padding for each besides first
grids = [g[..., 2:] if i != 0 else g for i, g in enumerate(grids)]
return torch.cat(grids, axis=-1)
elif is_uniform_grid:
if not is_grided_trainer:
grids = []
for i in range(n_images):
_, X_cntxt = points_to_grid(
selected_data["X_cntxt"][i],
selected_data["Y_cntxt"][i],
dataset.shape[1:],
background=torch.tensor([0.0] * dataset.shape[0]),
)
Y_trgt, X_trgt = points_to_grid(
selected_data["X_trgt"][i],
selected_data["Y_trgt"][i],
dataset.shape[1:],
background=torch.tensor([0.0] * dataset.shape[0]),
)
grids.append(
plot_posterior_samples(
dataset,
dict(
X_cntxt=X_cntxt,
Y_cntxt=Y_trgt, # Y_trgt is all X because no masking for target (assumption)
X_trgt=X_trgt,
Y_trgt=Y_trgt,
),
trainer.module_.cpu(),
**kwargs,
)
)
grids = [g[..., 2:] if i != 0 else g for i, g in enumerate(grids)]
return torch.cat(grids, axis=-1)
else:
return plot_posterior_samples(
dataset,
{k: torch.cat(v, dim=0) for k, v in selected_data.items()},
trainer.module_.cpu(),
**kwargs,
)
name, trainer = named_trainer
test_loglike = _plot_kde_loglike(name, trainer)
if named_trainer_compare is not None:
left = axes[0].get_xlim()[0]
_ = _plot_kde_loglike(*named_trainer_compare)
axes[0].set_xlim(left=left) # left bound by first model to not look strange
if len(x_lim) != 0:
axes[0].set_xlim(**x_lim)
if percentiles is not None:
idcs = []
values = []
for i, p in enumerate(percentiles):
# value closest to percentile
percentile_val = np.percentile(test_loglike, p, interpolation="nearest")
idcs.append(np.argwhere(test_loglike == percentile_val).item())
values.append(percentile_val)
sorted_idcs = list(np.sort(idcs))[::-1]
if is_smallest_xrange:
axes[0].set_xlim(left=values[0] - 0.05, right=values[-1] + 0.05)
else:
# find indices such that same space between all
values = np.linspace(test_loglike.min(), test_loglike.max(), n_images)
idcs = [(np.abs(test_loglike - v)).argmin() for v in values]
sorted_idcs = list(np.sort(idcs))[::-1]
axes[0].set_ylabel("Density")
axes[0].set_xlabel("Test Log-Likelihood")
selected_data = []
set_seed(seed) # make sure same order and indices for cntxt and trgt
i = -1
saved_values = []
queue = sorted_idcs.copy()
next_idx = queue.pop()
for data in trainer.get_iterator(dataset, training=False):
Xi, yi = unpack_data(data)
for cur_idx in range(yi.size(0)):
i += 1
if next_idx != i:
continue
selected_data.append(
{k: v[cur_idx : cur_idx + 1, ...] for k, v in Xi.items()}
)
if len(queue) == 0:
break
else:
next_idx = queue.pop()
# puts back to non sorted array
selected_data = [selected_data[sorted_idcs[::-1].index(idx)] for idx in idcs]
selected_data = {k: v for k, v in tuple_cont_to_cont_tuple(selected_data).items()}
for v in values:
axes[0].axvline(v, linestyle=":", alpha=0.7, c="tab:green")
axes[0].legend(loc="upper left")
if title is not None:
axes[0].set_title(title, fontsize=18)
is_grided_trainer = isinstance(trainer.module_, GridConvCNP)
grid = _plot_posterior_img_selected(name, trainer, selected_data, is_grided_trainer)
middle_img = dataset.shape[1] // 2 + 1 # half height
y_ticks = [middle_img, middle_img * 3]
y_ticks_labels = ["Context", name]
if named_trainer_compare is not None:
grid_compare = _plot_posterior_img_selected(
*named_trainer_compare, selected_data, is_grided_trainer
)
grid = torch.cat(
(grid, grid_compare[:, grid_compare.size(1) // (n_samples + 1) + 1 :, :]),
dim=1,
)
y_ticks += [middle_img * (3 + 2 * n_samples)]
y_ticks_labels += [named_trainer_compare[0]]
axes[1].imshow(grid.permute(1, 2, 0).numpy())
axes[1].yaxis.set_major_locator(ticker.FixedLocator(y_ticks))
axes[1].set_yticklabels(y_ticks_labels, rotation="vertical", va="center")
remove_axis(axes[1])
if percentiles is not None:
axes[1].xaxis.set_major_locator(
ticker.FixedLocator(
[
(dataset.shape[2] // 2 + 1) * (i * 2 + 1)
for i, p in enumerate(percentiles)
]
)
)
axes[1].set_xticklabels(["{}%".format(p) for p in percentiles])
else:
axes[1].set_xticks([])
fig.tight_layout(h_pad=h_pad) | f8b7c93c399c63df9cacc766da8fee99b6620ee8 | 8,302 |
import subprocess
def get_git_doc_ref():
"""Return the revision used for linking to source code on GitHub."""
global _head_ref
if not _head_ref:
try:
branch = git_get_nearest_tracking_branch('.')
_head_ref = _run_git(['rev-parse', branch]).strip()
except subprocess.CalledProcessError:
_head_ref = None
return _head_ref | cf4cbd6dcf1a95dc56e25ebf996e05abc35e4c86 | 8,303 |
def update_trails(force=False, offline=False):
"""
Update trails from feeds
"""
success = False
trails = {}
duplicates = {}
try:
if not os.path.isdir(USERS_DIR):
os.makedirs(USERS_DIR, 0755)
except Exception, ex:
exit("[!] something went wrong during creation of directory '%s' ('%s')" % (USERS_DIR, ex))
_chown(USERS_DIR)
if config.UPDATE_SERVER:
print "[i] retrieving trails from provided 'UPDATE_SERVER' server..."
content = retrieve_content(config.UPDATE_SERVER)
if not content or content.count(',') < 2:
print "[x] unable to retrieve data from '%s'" % config.UPDATE_SERVER
else:
with _fopen(TRAILS_FILE, "w+b") as f:
f.write(content)
trails = load_trails()
else:
trail_files = set()
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, "trails"))) :
for filename in filenames:
trail_files.add(os.path.abspath(os.path.join(dirpath, filename)))
if config.CUSTOM_TRAILS_DIR:
for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(ROOT_DIR, os.path.expanduser(config.CUSTOM_TRAILS_DIR)))) :
for filename in filenames:
trail_files.add(os.path.abspath(os.path.join(dirpath, filename)))
if not trails and (force or not os.path.isfile(TRAILS_FILE) or (time.time() - os.stat(TRAILS_FILE).st_mtime) >= config.UPDATE_PERIOD or os.stat(TRAILS_FILE).st_size == 0 or any(os.stat(_).st_mtime > os.stat(TRAILS_FILE).st_mtime for _ in trail_files)):
if not config.no_updates:
print "[i] updating trails (this might take a while)..."
else:
print "[i] checking trails..."
if not offline and (force or config.USE_FEED_UPDATES):
_ = os.path.abspath(os.path.join(ROOT_DIR, "trails", "feeds"))
if _ not in sys.path:
sys.path.append(_)
filenames = sorted(glob.glob(os.path.join(_, "*.py")))
else:
filenames = []
_ = os.path.abspath(os.path.join(ROOT_DIR, "trails"))
if _ not in sys.path:
sys.path.append(_)
filenames += [os.path.join(_, "static")]
filenames += [os.path.join(_, "custom")]
filenames = [_ for _ in filenames if "__init__.py" not in _]
if config.DISABLED_FEEDS:
filenames = [filename for filename in filenames if os.path.splitext(os.path.split(filename)[-1])[0] not in re.split(r"[^\w]+", config.DISABLED_FEEDS)]
for i in xrange(len(filenames)):
filename = filenames[i]
try:
module = __import__(os.path.basename(filename).split(".py")[0])
except (ImportError, SyntaxError), ex:
print "[x] something went wrong during import of feed file '%s' ('%s')" % (filename, ex)
continue
for name, function in inspect.getmembers(module, inspect.isfunction):
if name == "fetch":
print(" [o] '%s'%s" % (module.__url__, " " * 20 if len(module.__url__) < 20 else ""))
sys.stdout.write("[?] progress: %d/%d (%d%%)\r" % (i, len(filenames), i * 100 / len(filenames)))
sys.stdout.flush()
if config.DISABLED_TRAILS_INFO_REGEX and re.search(config.DISABLED_TRAILS_INFO_REGEX, getattr(module, "__info__", "")):
continue
try:
results = function()
for item in results.items():
if item[0].startswith("www.") and '/' not in item[0]:
item = [item[0][len("www."):], item[1]]
if item[0] in trails:
if item[0] not in duplicates:
duplicates[item[0]] = set((trails[item[0]][1],))
duplicates[item[0]].add(item[1][1])
if not (item[0] in trails and (any(_ in item[1][0] for _ in LOW_PRIORITY_INFO_KEYWORDS) or trails[item[0]][1] in HIGH_PRIORITY_REFERENCES)) or (item[1][1] in HIGH_PRIORITY_REFERENCES and "history" not in item[1][0]) or any(_ in item[1][0] for _ in HIGH_PRIORITY_INFO_KEYWORDS):
trails[item[0]] = item[1]
if not results and "abuse.ch" not in module.__url__:
print "[x] something went wrong during remote data retrieval ('%s')" % module.__url__
except Exception, ex:
print "[x] something went wrong during processing of feed file '%s' ('%s')" % (filename, ex)
try:
sys.modules.pop(module.__name__)
del module
except Exception:
pass
# custom trails from remote location
if config.CUSTOM_TRAILS_URL:
print(" [o] '(remote custom)'%s" % (" " * 20))
for url in re.split(r"[;,]", config.CUSTOM_TRAILS_URL):
url = url.strip()
if not url:
continue
url = ("http://%s" % url) if not "//" in url else url
content = retrieve_content(url)
if not content:
print "[x] unable to retrieve data (or empty response) from '%s'" % url
else:
__info__ = "blacklisted"
__reference__ = "(remote custom)" # urlparse.urlsplit(url).netloc
for line in content.split('\n'):
line = line.strip()
if not line or line.startswith('#'):
continue
line = re.sub(r"\s*#.*", "", line)
if '://' in line:
line = re.search(r"://(.*)", line).group(1)
line = line.rstrip('/')
if line in trails and any(_ in trails[line][1] for _ in ("custom", "static")):
continue
if '/' in line:
trails[line] = (__info__, __reference__)
line = line.split('/')[0]
elif re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", line):
trails[line] = (__info__, __reference__)
else:
trails[line.strip('.')] = (__info__, __reference__)
for match in re.finditer(r"(\d+\.\d+\.\d+\.\d+)/(\d+)", content):
prefix, mask = match.groups()
mask = int(mask)
if mask > 32:
continue
start_int = addr_to_int(prefix) & make_mask(mask)
end_int = start_int | ((1 << 32 - mask) - 1)
if 0 <= end_int - start_int <= 1024:
address = start_int
while start_int <= address <= end_int:
trails[int_to_addr(address)] = (__info__, __reference__)
address += 1
# basic cleanup
for key in trails.keys():
if key not in trails:
continue
if config.DISABLED_TRAILS_INFO_REGEX:
if re.search(config.DISABLED_TRAILS_INFO_REGEX, trails[key][0]):
del trails[key]
continue
try:
_key = key.decode("utf8").encode("idna")
if _key != key: # for domains with non-ASCII letters (e.g. phishing)
trails[_key] = trails[key]
del trails[key]
key = _key
except:
pass
if not key or re.search(r"\A(?i)\.?[a-z]+\Z", key) and not any(_ in trails[key][1] for _ in ("custom", "static")):
del trails[key]
continue
if re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", key):
if any(_ in trails[key][0] for _ in ("parking site", "sinkhole")) and key in duplicates:
del duplicates[key]
if trails[key][0] == "malware":
trails[key] = ("potential malware site", trails[key][1])
if trails[key][0] == "ransomware":
trails[key] = ("ransomware (malware)", trails[key][1])
if key.startswith("www.") and '/' not in key:
_ = trails[key]
del trails[key]
key = key[len("www."):]
if key:
trails[key] = _
if '?' in key:
_ = trails[key]
del trails[key]
key = key.split('?')[0]
if key:
trails[key] = _
if '//' in key:
_ = trails[key]
del trails[key]
key = key.replace('//', '/')
trails[key] = _
if key != key.lower():
_ = trails[key]
del trails[key]
key = key.lower()
trails[key] = _
if key in duplicates:
_ = trails[key]
others = sorted(duplicates[key] - set((_[1],)))
if others and " (+" not in _[1]:
trails[key] = (_[0], "%s (+%s)" % (_[1], ','.join(others)))
read_whitelist()
for key in trails.keys():
if check_whitelisted(key) or any(key.startswith(_) for _ in BAD_TRAIL_PREFIXES):
del trails[key]
elif re.search(r"\A\d+\.\d+\.\d+\.\d+\Z", key) and (bogon_ip(key) or cdn_ip(key)):
del trails[key]
else:
try:
key.decode("utf8")
trails[key][0].decode("utf8")
trails[key][1].decode("utf8")
except UnicodeDecodeError:
del trails[key]
try:
if trails:
with _fopen(TRAILS_FILE, "w+b") as f:
writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)
for trail in trails:
writer.writerow((trail, trails[trail][0], trails[trail][1]))
success = True
except Exception, ex:
print "[x] something went wrong during trails file write '%s' ('%s')" % (TRAILS_FILE, ex)
print "[i] update finished%s" % (40 * " ")
if success:
print "[i] trails stored to '%s'" % TRAILS_FILE
return trails | 2ff83a3681899d6dffa8bdcedcbb7e9839bbc919 | 8,304 |
def bq_to_rows(rows):
"""Reformat BigQuery's output to regular pnguin LOD data
Reformat BigQuery's output format so we can put it into a DataFrame
Args:
rows (dict): A nested list of key-value tuples that need to be converted into a list of dicts
Returns:
list: A list of dictionaries based on the input x
"""
def _reformat(x):
pairs = x.items()
row = {}
for pair in pairs:
key, value = pair
row[key] = value
return row
return [_reformat(x) for x in rows] | 9ff842d1c41d7ebe5c822d4c07b2f26b5524b0fe | 8,305 |
def network_config(session, args):
"""network config functions"""
cmd = pluginlib.exists(args, 'cmd')
if not isinstance(cmd, basestring):
msg = "invalid command '%s'" % str(cmd)
raise pluginlib.PluginError(msg)
return
if cmd not in ALLOWED_NETWORK_CMDS:
msg = "Dom0 execution of '%s' is not permitted" % cmd
raise pluginlib.PluginError(msg)
return
cmd_args = pluginlib.exists(args, 'args')
return ALLOWED_NETWORK_CMDS[cmd](cmd_args) | d2a551166e7d5c445f1cba6404a3e526f4e7ecdd | 8,306 |
from DB import dbget
from DB import dbput
def persistent_property(name,default_value=0.0):
"""A propery object to be used inside a class"""
def get(self):
class_name = getattr(self,"name",self.__class__.__name__)
if not "{name}" in name:
if class_name: dbname = class_name+"."+name
else: dbname = name
else: dbname = name.replace("{name}",class_name)
##debug("persistent_property.get: %s: %r, %r: %r" % (name,self,class_name,dbname))
t = dbget(dbname)
if type(default_value) == str and default_value.startswith("self."):
def_val = getattr(self,default_value[len("self."):])
else: def_val = default_value
dtype = type(def_val)
try: from numpy import nan,inf,array # for "eval"
except: pass
try: import wx # for "eval"
except: pass
try: t = dtype(eval(t))
except: t = def_val
return t
def set(self,value):
class_name = getattr(self,"name",self.__class__.__name__)
if not "{name}" in name:
if class_name: dbname = class_name+"."+name
else: dbname = name
else: dbname = name.replace("{name}",class_name)
##debug("persistent_property.set: %s: %r, %r: %r" % (name,self,class_name,dbname))
dbput(dbname,repr(value))
return property(get,set) | 44ed6d8a20b4d84c8c4d27410ba28a17d37f7ef1 | 8,307 |
def album_id(items, sp_album):
"""Iterate through results to find correct Discogs album id."""
try:
artist = sp_album['artists'][0].lower().replace(" ", "")
except IndexError:
artist = ""
owners = -1
discogs_id = -1
similarity = 0
title = sp_album['name'].lower().replace(" ", "")
for album in items:
# title format: artist - title
index = album['title'].rfind(" - ")
disc_artist = album['title'][:index].lower().replace(" ", "")
disc_title = album['title'][index+3:].lower().replace(" ", "")
# calculate string similarity for artist spelling deviations
jw_similarity = jellyfish.jaro_winkler_similarity(artist, disc_artist)
# comparison for use of symbols in titles (& vs and)
if jellyfish.match_rating_comparison(disc_title, title):
# If they are basically the same, then match the best artist
if jellyfish.match_rating_comparison(artist, disc_artist):
if album['community']['have'] > owners:
owners = album['community']['have']
discogs_id = album['id']
similarity = jw_similarity
# If they are the same and this release is more popular
elif (jw_similarity == similarity and
album['community']['have'] > owners):
owners = album['community']['have']
discogs_id = album['id']
# If a better artist candidate is found
elif jw_similarity > similarity:
owners = album['community']['have']
discogs_id = album['id']
similarity = jw_similarity
# we havent found the artist if the name is not similar enough
if similarity < 0.85:
return -1
return discogs_id | 1c8f0f870c1a0c6c71de115ae6a0d15cf235af6f | 8,308 |
def css_defaults(name, css_dict):
"""Находит первое значение по-умолчанию
background -> #FFF
color -> #FFF
content -> ""
"""
cur = css_dict.get(name) or css_dict.get(name[1:-1])
if cur is None:
return None
default = cur.get('default')
if default is not None:
return default
for v in cur['values']:
if v.startswith('<') and v.endswith('>'):
ret = css_defaults(v, css_dict)
if ret is not None:
return ret | 8418af5e27dfc85a3ec70dea2e7416595ee86a1f | 8,309 |
def yn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn(x).
"""
return jnyn_zeros(n,nt)[2] | 384ebc8fec6109de36d3c17d265b53c01a2195b6 | 8,310 |
def get_chebi_parents(chebi_ent):
"""
Get parents of ChEBI entity
:param chebi_ent:
:return:
"""
if hasattr(chebi_ent, 'OntologyParents'):
return [ent.chebiId for ent in chebi_ent.OntologyParents if
(ent.type == 'is a')]
else:
return [] | bfdf3cbfae45c07a9f5f97a85f1c64f680ac49fc | 8,311 |
def average_saccades_time(saccades_times):
"""
:param saccades_times: a list of tuples with (start_time_inclusive, end_time_exclusive)
:return: returns the average time of saccades
"""
return sum([saccade_time[1] - saccade_time[0] for saccade_time in saccades_times]) / len(saccades_times) | a22a5d89ddd4317fa10ed6f5d920f17560028514 | 8,312 |
from typing import Optional
from typing import List
from typing import Tuple
import logging
def solve_tsp_local_search(
distance_matrix: np.ndarray,
x0: Optional[List[int]] = None,
perturbation_scheme: str = "two_opt",
max_processing_time: Optional[float] = None,
log_file: Optional[str] = None,
) -> Tuple[List, float]:
"""Solve a TSP problem with a local search heuristic
Parameters
----------
distance_matrix
Distance matrix of shape (n x n) with the (i, j) entry indicating the
distance from node i to j
x0
Initial permutation. If not provided, it starts with a random path
perturbation_scheme {"ps1", "ps2", "ps3", "ps4", "ps5", "ps6", ["two_opt"]}
Mechanism used to generate new solutions. Defaults to "two_opt"
max_processing_time {None}
Maximum processing time in seconds. If not provided, the method stops
only when a local minimum is obtained
log_file
If not `None`, creates a log file with details about the whole
execution
Returns
-------
A permutation of nodes from 0 to n - 1 that produces the least total
distance obtained (not necessarily optimal).
The total distance the returned permutation produces.
Notes
-----
Here are the steps of the algorithm:
1. Let `x`, `fx` be a initial solution permutation and its objective
value;
2. Perform a neighborhood search in `x`:
2.1 For each `x'` neighbor of `x`, if `fx'` < `fx`, set `x` <- `x'`
and stop;
3. Repeat step 2 until all neighbors of `x` are tried and there is no
improvement. Return `x`, `fx` as solution.
"""
x, fx = setup(distance_matrix, x0)
max_processing_time = max_processing_time or np.inf
if log_file:
fh = logging.FileHandler(log_file)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
logger.setLevel(logging.INFO)
tic = default_timer()
stop_early = False
improvement = True
while improvement and (not stop_early):
improvement = False
for n_index, xn in enumerate(neighborhood_gen[perturbation_scheme](x)):
if default_timer() - tic > max_processing_time:
logger.warning("Stopping early due to time constraints")
stop_early = True
break
fn = compute_permutation_distance(distance_matrix, xn)
logger.info(f"Current value: {fx}; Neighbor: {n_index}")
if fn < fx:
improvement = True
x, fx = xn, fn
break # early stop due to first improvement local search
return x, fx | f1b77b7fb3d1b83d18a7f2ba99d4a266e98f8462 | 8,313 |
def split(self, split_size_or_sections, dim=0, copy=True):
"""Return the split chunks along the given dimension.
Parameters
----------
split_size_or_sections : Union[int, Sequence[int]
The number or size of chunks.
dim : int, optional, default=0
The dimension to split.
copy : bool, optional, default=True
Copy or create the views of input.
Returns
-------
Sequence[dragon.vm.torch.Tensor]
The output tensors.
See Also
--------
`torch.split(...)`_
"""
return array_ops.split(self, split_size_or_sections, dim, copy) | cd6725af62fc0f5cde758e23add206a2ddb7c0af | 8,314 |
def HighFlowSingleInletTwoCompartmentGadoxetateModel(xData2DArray, Ve: float,
Kbh: float, Khe: float,
dummyVariable):
"""This function contains the algorithm for calculating how concentration varies with time
using the High Flow Single Inlet Two Compartment Gadoxetate Model model.
Input Parameters
----------------
xData2DArray - time and AIF concentration 1D arrays stacked into one 2D array.
Ve - Plasma Volume Fraction (decimal fraction)
Khe - Hepatocyte Uptake Rate (mL/min/mL)
Kbh - 'Biliary Efflux Rate (mL/min/mL)'-
Returns
-------
modelConcs - list of calculated concentrations at each of the
time points in array 'time'.
"""
try:
# Logging and exception handling function.
exceptionHandler.modelFunctionInfoLogger()
# In order to use lmfit curve fitting, time and concentration must be
# combined into one function input parameter, a 2D array, then separated into individual
# 1 D arrays
times = xData2DArray[:,0]
AIFconcentrations = xData2DArray[:,1]
Th = (1-Ve)/Kbh
modelConcs = []
modelConcs = (Ve*AIFconcentrations + Khe*Th*tools.expconv(Th, times, AIFconcentrations, 'HighFlowSingleInletTwoCompartmentGadoxetateModel'))
return(modelConcs)
# Exception handling and logging code.
except ZeroDivisionError as zde:
exceptionHandler.handleDivByZeroException(zde)
except Exception as e:
exceptionHandler.handleGeneralException(e) | 18684058926b9362b7a6b495cf1f48fd8c3188e4 | 8,315 |
import struct
import numpy
def read_cz_lsminfo(fh, byteorder, dtype, count, offsetsize):
"""Read CZ_LSMINFO tag from file and return as dict."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError('invalid CZ_LSMINFO structure')
fh.seek(-8, 1)
if structure_size < numpy.dtype(TIFF.CZ_LSMINFO).itemsize:
# adjust structure according to structure_size
lsminfo = []
size = 0
for name, dtype in TIFF.CZ_LSMINFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
lsminfo.append((name, dtype))
else:
lsminfo = TIFF.CZ_LSMINFO
lsminfo = fh.read_record(lsminfo, byteorder=byteorder)
lsminfo = recarray2dict(lsminfo)
# read LSM info subrecords at offsets
for name, reader in TIFF.CZ_LSMINFO_READERS.items():
if reader is None:
continue
offset = lsminfo.get('Offset' + name, 0)
if offset < 8:
continue
fh.seek(offset)
try:
lsminfo[name] = reader(fh)
except ValueError:
pass
return lsminfo | 1bcf4d22315503e2f21fcbacd6a0797fc2fd16a7 | 8,316 |
def mi_alignment(
alignment,
mi_calculator=mi,
null_value=DEFAULT_NULL_VALUE,
excludes=DEFAULT_EXCLUDES,
exclude_handler=None,
):
"""Calc mi over all position pairs in an alignment
alignment: the full alignment object
mi_calculator: a function which calculated MI from two entropies and
their joint entropy -- see mi and normalized_mi for examples
null_value: the value to be returned if mi cannot be calculated (e.g.,
if mi_calculator == normalized_mi and joint_h = 0.0)
excludes: iterable objects containing characters that require special
handling -- by default, if a position contains an exclude, null_value
will be returned. For non-default handling, pass an exclude_handler
exclude_handler: a function which takes a position and returns it
with exclude characters processed in someway.
"""
aln_length = len(alignment)
# Create result matrix
result = zeros((aln_length, aln_length), float)
# Compile postional entropies for each position in the alignment
# I believe I started using this rather than alignment.uncertainties
# b/c the latter relies on converting a ArrayAlignment to an Alignment --
# need to check into this.
positional_entropies = alignment.entropy_per_pos()
# Calculate pairwise MI between position_number and all alignment
# positions, and return the results in a vector.
for i in range(aln_length):
for j in range(i + 1):
result[i, j] = mi_pair(
alignment,
pos1=i,
pos2=j,
h1=positional_entropies[i],
h2=positional_entropies[j],
mi_calculator=mi_calculator,
null_value=null_value,
excludes=excludes,
exclude_handler=exclude_handler,
)
# copy the lower triangle to the upper triangle to make
# the matrix symmetric
ltm_to_symmetric(result)
return result | f576b8c4df018bba787c7c46091e52b70badd9de | 8,317 |
def Jaccard3d(a, b):
"""
This will compute the Jaccard Similarity coefficient for two 3-dimensional volumes
Volumes are expected to be of the same size. We are expecting binary masks -
0's are treated as background and anything else is counted as data
Arguments:
a {Numpy array} -- 3D array with first volume
b {Numpy array} -- 3D array with second volume
Returns:
float
"""
if len(a.shape) != 3 or len(b.shape) != 3:
raise Exception(f"Expecting 3 dimensional inputs, got {a.shape} and {b.shape}")
if a.shape != b.shape:
raise Exception(f"Expecting inputs of the same shape, got {a.shape} and {b.shape}")
# TASK: Write implementation of Jaccard similarity coefficient. Please do not use
# the Dice3D function from above to do the computation ;)
# <YOUR CODE GOES HERE>
overlap = 0
for i in range(a.shape[0]):
for j in range(a.shape[1]):
for j in range(a.shape[2]):
if (a[i, j, k] != 0 and b[i, j, k] != 0):
overlap += 1
all_together = 0
for i in range(a.shape[0]):
for j in range(a.shape[1]):
for j in range(a.shape[2]):
if (a[i, j, k] != 0 or b[i, j, k] != 0):
all_together += 1
return overlap/all_together | a4452e523e484db50b99d36f9ee67c3508678ea6 | 8,318 |
def get_pod_obj(name, namespace=None):
"""
Returns the pod obj for the given pod
Args:
name (str): Name of the resources
Returns:
obj : A pod object
"""
ocp_obj = OCP(api_version='v1', kind=constants.POD, namespace=namespace)
ocp_dict = ocp_obj.get(resource_name=name)
pod_obj = Pod(**ocp_dict)
return pod_obj | 464aa15574ee65672f7963e6e5426753ff98ee72 | 8,319 |
import sys
import subprocess
def _input_password() -> str:
"""
Get password input by masking characters.
Similar to getpass() but works with cygwin.
"""
sys.stdout.write("Password :\n")
sys.stdout.flush()
subprocess.check_call(["stty", "-echo"])
password = input()
subprocess.check_call(["stty", "echo"])
return password | 8d3dbc3f6221f3a2558dab5617227b2f6e4940ca | 8,320 |
import os
def file_size(file_path):
"""Return the file size."""
if os.path.isfile(file_path):
file_info = os.stat(file_path)
return convert_bytes(file_info.st_size) | ed28923767f0a7e708d66cb06fba7ad8120b69d7 | 8,321 |
def median_rank(PESSI_SORT, OPTI_SORT, A):
"""
Calculates the median rank of each action.
:param PESSI_SORT: Dictionary containing the actions classified according to the pessimistic procedure.
:param OPTI_SORT: Dictionary containing the actions classified according to the optimistic procedure.
:param A: List containing the names of the actions as strings.
:return med_rank: Dictionary containing the median rank of each action. The keys are the names of the actions
and the values are the median ranks.
"""
med_rank = {}
for a in A:
med_rank[a] = (OPTI_SORT[1][a] + PESSI_SORT[1][a]) / 2
return med_rank | 7f760847ae2a69edf07a593a6ebfb84dce4c4103 | 8,322 |
from typing import Optional
import json
def get_token(
event: ApiGatewayEvent,
_context: LambdaContext,
node_api: Optional[NodeApi] = None
) -> dict:
"""Get token details given a token uid.
*IMPORTANT: Any changes on the parameters should be reflected on the `cacheKeyParameters` for this method.
"""
node_api = node_api or NodeApi()
id = event.query.get("id")
if id is None:
raise ApiError("invalid_parameters")
response = node_api.get_token(id)
return {
"statusCode": 200,
"body": json.dumps(response or UNKNOWN_ERROR_MSG),
"headers": {
"Content-Type": "application/json"
}
} | 7be9e80aef60ad7f5befa3f21b597541eb79c4d0 | 8,323 |
import re
def update_dictionary_entries(old_entries, need_to_add):
"""
Expects dictionary of species entries and
unique list of species (as SMILES) that need to be added
Creates new entries for the species that need to be added
Returns old and new entries
"""
list(set(need_to_add))
for j, species in enumerate(need_to_add):
molecule = rmgpy.molecule.Molecule(smiles=species)
adjlist = molecule.to_adjacency_list()
multiplicity = None
if re.search('(?<=multiplicity ).*', adjlist):
multiplicity = int(
re.search('(?<=multiplicity ).*', adjlist).group(0))
adjlist = re.sub(r'multiplicity .*',
f'multiplicity [{multiplicity}]', adjlist)
group = rmgpy.molecule.group.Group()
group.from_adjacency_list(adjlist)
atom_counts = {}
rel_label = ''
for atom in ['C', 'H', 'O']:
count = species.count(atom)
if count > 0:
rel_label = rel_label + atom + str(count)
assert rel_label != ''
"""
3 Scenerios:
No old -> no need for ID number: max_ID = -1
Only one old -> needs to have ID of 1: max_ID = 0
Multiple old -> needs to have a unique ID: max_ID > 0
"""
new_ID = None
max_ID = -1
duplicate = False
for old_label in old_entries:
old_entry = old_entries[old_label]
if group.is_isomorphic(old_entry.item):
duplicate = True
print(f'{old_entry} found to be duplicate')
continue
if rel_label not in old_label:
continue
if rel_label == old_label and max_ID == -1:
# Atleast one with same label
max_ID = 0
if old_label.find('-') > 0:
old_label, ID_str = old_label.split('-')
ID = int(ID_str)
if old_label == rel_label and ID > max_ID:
# Multiple exisitng labels
max_ID = ID
if max_ID > -1:
# Existing label
new_ID = max_ID + 1
rel_label = rel_label + '-' + str(new_ID)
if not duplicate:
entry = rmgpy.data.base.Entry()
entry.label = rel_label
entry.item = group
assert rel_label not in list(old_entries.keys())
old_entries[rel_label] = entry
entry_labels = [old_entries[key].label for key in old_entries]
assert len(entry_labels) == len(list(set(entry_labels))
), 'Non-unique labels in dictionary'
return old_entries | 9182c42349b76a7e72c3c1c134cb347ed0bd2a2d | 8,324 |
import optparse
import os
import sys
def ParseOptions():
"""Parses the options passed to the program.
@return: Options and arguments
"""
parser = optparse.OptionParser(usage="%prog [--no-backup]",
prog=os.path.basename(sys.argv[0]))
parser.add_option(cli.DEBUG_OPT)
parser.add_option(cli.VERBOSE_OPT)
parser.add_option(cli.YES_DOIT_OPT)
parser.add_option("--no-backup", dest="backup", default=True,
action="store_false",
help="Whether to create backup copies of deleted files")
(opts, args) = parser.parse_args()
return VerifyOptions(parser, opts, args) | f697bc76956b30fbe9836cd8ba4fce40d726968c | 8,325 |
def four_rooms(dims, doorway=1.):
"""
Args:
dims: [dimx, dimy] dimensions of rectangle
doorway: size of doorway
Returns:
adjmat: adjacency matrix
xy: xy coordinates of each state for plotting
labels: empty []
"""
half_x, half_y = (dims[0]*.5, dims[1]*.5)
quarter_x, quarter_y = (dims[0]*.25, dims[1]*.25)
threequarter_x, threequarter_y = (dims[0]*.75, dims[1]*.75)
adj, xy, _ = rectangle_mesh(dims)
room = np.array([xy[:,0] < half_x, xy[:,1] < half_y], dtype=np.float32).T
mask = np.array(distance.squareform(distance.pdist(room, "euclidean")) == 0, dtype=np.float32)
labels = np.sum(room * np.array([[1, 2]]), 1)
doorsx = [quarter_x, threequarter_x, half_x, half_x]
doorsy = [half_y, half_y, quarter_y, threequarter_y]
doors = np.array([doorsx, doorsy]).T
inds = []
for d in doors:
dist_to_door = np.sum(np.abs(xy - d[None, :]), 1)
ind = np.where(dist_to_door == np.min(dist_to_door))[0]
if len(ind) > 1: ind = ind[0]
mask[ind, :] = 1
mask[:, ind] = 1
adj = adj * mask
return adj, xy, labels | 0744ab4b38ab0b5d0b96c53d45c88dc1e37f932e | 8,326 |
def get_verse_url(verse: str) -> str:
"""Creates a URL for the verse text."""
node = CONNECTIONS[verse]
volume = scripture_graph.VOLUMES_SHORT[node['volume']].lower()
if volume == 'bom':
volume = 'bofm'
elif volume == 'd&c':
volume = 'dc-testament'
elif volume == 'pogp':
volume = 'pgp'
book = node['book'].lower()
book_replacements = {
' ': '-',
'.': '',
'&': '',
'—': '-',
}
for old, new in book_replacements.items():
book = book.replace(old, new)
if book == 'd&c':
book = 'dc'
chapter = node['chapter']
i = node['verse']
return parse.urljoin(URL_BASE,
f'{volume}/{book}/{chapter}.{i}?lang=eng#p{i}#{i}') | 37ce47aa6e18e3f550e9adacb3bf16affb6154f8 | 8,327 |
from typing import cast
from typing import List
def get_ws_dependency_annotation(state: GlobalState) -> WSDependencyAnnotation:
""" Returns the world state annotation
:param state: A global state object
"""
annotations = cast(
List[WSDependencyAnnotation],
list(state.world_state.get_annotations(WSDependencyAnnotation)),
)
if len(annotations) == 0:
annotation = WSDependencyAnnotation()
state.world_state.annotate(annotation)
else:
annotation = annotations[0]
return annotation | ba44455594c4a1f63dac5adec95b2efe6a4b2af6 | 8,328 |
def get_gin_confg_strs():
"""
Obtain both the operative and inoperative config strs from gin.
The operative configuration consists of all parameter values used by
configurable functions that are actually called during execution of the
current program, and inoperative configuration consists of all parameter
configured but not used by configurable functions. See
``gin.operative_config_str()`` and ``gin_utils.inoperative_config_str`` for
more detail on how the config is generated.
Returns:
tuple:
- md_operative_config_str (str): a markdown-formatted operative str
- md_inoperative_config_str (str): a markdown-formatted inoperative str
"""
operative_config_str = gin.operative_config_str()
md_operative_config_str = _markdownify_gin_config_str(
operative_config_str,
'All parameter values used by configurable functions that are actually called'
)
md_inoperative_config_str = gin_utils.inoperative_config_str()
if md_inoperative_config_str:
md_inoperative_config_str = _markdownify_gin_config_str(
md_inoperative_config_str,
"All parameter values configured but not used by program. The configured "
"functions are either not called or called with explicit parameter values "
"overriding the config.")
return md_operative_config_str, md_inoperative_config_str | 9f9081aafa6a4a43be37edd4002ee17ac518f5d4 | 8,329 |
def L(x, c, gamma):
"""Return c-centered Lorentzian line shape at x with HWHM gamma"""
return gamma / (np.pi * ((x - c) ** 2 + gamma ** 2)) | 853ba2c978a50f9f43915342caebed2e3d5ead8d | 8,330 |
import socket
import logging
def request_data_from_weather_station():
"""
Send a command to the weather station to get current values.
Returns
-------
bytes
received data, 0 if error occurred
"""
sock = socket.create_connection((WEATHER_HOST, WEATHER_PORT), GRAPHITE_TIMEOUT)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
data = 0
try:
sock.send(CMD_ACT)
data = sock.recv(1024)
except:
logging.error('Error getting data from weather station!')
finally:
sock.close()
if check_crc(data):
return data
logging.error('CRC failed! \r\n Data: %s', data)
return 0 | 0bf28c3cf5db1f14446aa9866b9854b435378439 | 8,331 |
def solution2(arr):
"""improved solution1 #TLE """
if len(arr) == 1:
return arr[0]
max_sum = float('-inf')
l = len(arr)
for i in range(l):
local_sum = arr[i]
local_min = arr[i]
max_sum = max(max_sum, local_sum)
for j in range(i + 1, l):
local_sum += arr[j]
local_min = min(local_min, arr[j])
max_sum = max([max_sum, local_sum, local_sum - local_min])
return max_sum | 835240bb4f70e5b6425a6ac0d2a4210e2c8a0ad0 | 8,332 |
def hillas_parameters_4(pix_x, pix_y, image, recalculate_pixels=True):
"""Compute Hillas parameters for a given shower image.
As for hillas_parameters_3 (old Whipple Fortran code), but more Pythonized
MP: Parameters calculated as Whipple Reynolds et al 1993 paper:
http://adsabs.harvard.edu/abs/1993ApJ...404..206R
which should be the same as one of my ICRC 1991 papers and my thesis.
Parameters
----------
pix_x : array_like
Pixel x-coordinate
pix_y : array_like
Pixel y-coordinate
image : array_like
Pixel values corresponding
recalculate_pixels : Boolean (default True)
Recalculate the pixel higher multiples (e.g., if pixels move
(!) or pixel list changes between calls)
Returns
-------
hillas_parameters : `MomentParameters`
"""
if type(pix_x) == Quantity:
unit = pix_x.unit
assert pix_x.unit == pix_y.unit
else:
unit = 1.0
''' MP: Actually, I don't know why we need to strip the units...
shouldn' the calculations all work with them?'''
pix_x = Quantity(np.asanyarray(pix_x, dtype=np.float64)).value
pix_y = Quantity(np.asanyarray(pix_y, dtype=np.float64)).value
image = np.asanyarray(image, dtype=np.float64)
assert pix_x.shape == image.shape
assert pix_y.shape == image.shape
(sumsig, sumxsig, sumysig, sumx2sig, sumy2sig, sumxysig, sumx3sig,
sumx2ysig, sumxy2sig, sumy3sig) = np.zeros(10)
# Call static_xy to initialize the "static variables"
# Actually, would be nice to just call this if we
# know the pixel positions have changed
static_xy(pix_x, pix_y, recalculate_pixels)
sumsig = image.sum()
sumxsig = (image * pix_x).sum()
sumysig = (image * pix_y).sum()
sumx2sig = (image * static_xy.pix_x2).sum()
sumy2sig = (image * static_xy.pix_y2).sum()
sumxysig = (image * static_xy.pix_xy).sum()
sumx3sig = (image * static_xy.pix_x3).sum()
sumx2ysig = (image * static_xy.pix_x2y).sum()
sumxy2sig = (image * static_xy.pix_xy2).sum()
sumy3sig = (image * static_xy.pix_y3).sum()
sumx4sig = (image * static_xy.pix_x4).sum()
sumx3ysig = (image * static_xy.pix_x3y).sum()
sumx2y2sig = (image * static_xy.pix_x2y2).sum()
sumxy3sig = (image * static_xy.pix_xy3).sum()
sumy4sig = (image * static_xy.pix_y4).sum()
if sumsig == 0.0:
raise (HillasParameterizationError(("Empty pixels!"
"Cannot calculate image parameters."
"Exiting...")))
xm = sumxsig / sumsig
ym = sumysig / sumsig
x2m = sumx2sig / sumsig
y2m = sumy2sig / sumsig
xym = sumxysig / sumsig
x3m = sumx3sig / sumsig
x2ym = sumx2ysig / sumsig
xy2m = sumxy2sig / sumsig
y3m = sumy3sig / sumsig
x4m = sumx4sig / sumsig
x3ym = sumx3ysig / sumsig
x2y2m = sumx2y2sig / sumsig
xy3m = sumxy3sig / sumsig
y4m = sumy4sig / sumsig
'''Doing this should be same as above, but its 4us slower !?
(xm, ym, x2m, y2m, xym, x3m, x2ym, xy2m, y3m) = \
(sumxsig, sumysig, sumx2sig, sumy2sig, sumxysig, sumx3sig,
sumx2ysig, sumxy2sig, sumy3sig) / sumsig'''
xm2 = xm * xm
ym2 = ym * ym
xmym = xm * ym
vx2 = x2m - xm2
vy2 = y2m - ym2
vxy = xym - xmym
vx3 = x3m - 3.0 * xm * x2m + 2.0 * xm2 * xm
vx2y = x2ym - x2m * ym - 2.0 * xym * xm + 2.0 * xm2 * ym
vxy2 = xy2m - y2m * xm - 2.0 * xym * ym + 2.0 * xm * ym2
vy3 = y3m - 3.0 * ym * y2m + 2.0 * ym2 * ym
d = vy2 - vx2
dist = np.sqrt(xm2 + ym2) # could use hypot(xm,ym), but already have squares
phi = np.arctan2(ym, xm)
# -- simpler formulae for length & width suggested CA 901019
z = np.hypot(d, 2.0 * vxy)
length = np.sqrt((vx2 + vy2 + z) / 2.0)
width = np.sqrt((vy2 + vx2 - z) / 2.0)
# -- simpler formula for miss introduced CA, 901101
# -- revised MP 910112
if z == 0.0:
miss = dist
else:
uu = 1 + d / z
vv = 2 - uu
miss = np.sqrt((uu * xm2 + vv * ym2) / 2.0 - xmym * (2.0 * vxy / z))
'''Change to faster caluclation of psi and avoid inaccuracy for hyp
psi = np.arctan2((d + z) * ym + 2.0 * vxy * xm, 2.0 *vxy * ym - (d - z) * xm)
hyp = np.sqrt(2 * z * (z + d)) #! should be simplification of sqrt((d+z)
**2+(2*vxy)**2 ... but not accurate!
hyp = np.hypot(d + z,2 * vxy)
psi = np.arctan2(d + z, 2 * vxy)
cpsi = np.cos(psi)
spsi = np.sin(psi)'''
tanpsi_numer = (d + z) * ym + 2.0 * vxy * xm
tanpsi_denom = 2.0 * vxy * ym - (d - z) * xm
psi = np.arctan2(tanpsi_numer, tanpsi_denom)
# Code to de-interface with historical code
size = sumsig
m_x = xm
m_y = ym
length = length
r = dist
# Note, "skewness" is the same as the Whipple/MP "asymmetry^3", which is fine.
# ... and also, Whipple/MP "asymmetry" * "length" = MAGIC "asymmetry"
# ... so, MAGIC "asymmetry" = MAGIC "skewness"^(1/3) * "length"
# I don't know what MAGIC's "asymmetry" is supposed to be.
# -- Asymmetry and other higher moments
if length != 0.0:
vx4 = x4m - 4.0 * xm * x3m + 6.0 * xm2 * x2m - 3.0 * xm2 * xm2
vx3y = x3ym - 3.0 * xm * x2ym + 3.0 * xm2 * xym - x3m * ym \
+ 3.0 * x2m * xmym - 3.0 * xm2 * xm * ym
vx2y2 = x2y2m - 2.0 * ym * x2ym + x2m * ym2 \
- 2.0 * xm * xy2m + 4.0 * xym * xmym + xm2 * y2m - 3.0 * xm2 * ym2
vxy3 = xy3m - 3.0 * ym * xy2m + 3.0 * ym2 * xym - y3m * xm \
+ 3.0 * y2m * xmym - 3.0 * ym2 * ym * xm
vy4 = y4m - 4.0 * ym * y3m + 6.0 * ym2 * y2m - 3.0 * ym2 * ym2
hyp = np.hypot(tanpsi_numer, tanpsi_denom)
if hyp != 0.:
cpsi = tanpsi_denom / hyp
spsi = tanpsi_numer / hyp
else:
cpsi = 1.
spsi = 0.
cpsi2 = cpsi * cpsi
spsi2 = spsi * spsi
cspsi = cpsi * spsi
sk3bylen3 = (vx3 * cpsi * cpsi2 +
3.0 * vx2y * cpsi2 * spsi +
3.0 * vxy2 * cpsi * spsi2 +
vy3 * spsi * spsi2)
asym = np.copysign(np.power(np.abs(sk3bylen3), 1. / 3.), sk3bylen3) / length
skewness = asym * asym * asym # for MP's asym... (not for MAGIC asym!)
# Kurtosis
kurt = (vx4 * cpsi2 * cpsi2 +
4.0 * vx3y * cpsi2 * cspsi +
6.0 * vx2y2 * cpsi2 * spsi2 +
4.0 * vxy3 * cspsi * spsi2 +
vy4 * spsi2 * spsi2)
kurtosis = kurt / (length * length * length * length)
else: # Skip Higher Moments
asym = 0.0
psi = 0.0
skewness = 0.0
kurtosis = 0.0
# Azwidth not used anymore
# # -- Akerlof azwidth now used, 910112
# d = y2m - x2m
# z = np.sqrt(d * d + 4 * xym * xym)
# azwidth = np.sqrt((x2m + y2m - z) / 2.0)
return MomentParameters(size=size, cen_x=m_x * unit, cen_y=m_y * unit,
length=length * unit, width=width * unit, r=r * unit,
phi=Angle(phi * u.rad),
psi=Angle(psi * u.rad),
miss=miss * unit,
skewness=skewness, kurtosis=kurtosis) | 87fa302b6e6b1b81b66d8e8fb7cc4e34da1583d9 | 8,333 |
from typing import List
from typing import Optional
def create_intrusion_set(
name: str,
aliases: List[str],
author: Identity,
primary_motivation: Optional[str],
secondary_motivations: List[str],
external_references: List[ExternalReference],
object_marking_refs: List[MarkingDefinition],
) -> IntrusionSet:
"""Create an intrusion set."""
return IntrusionSet(
created_by_ref=author,
name=name,
aliases=aliases,
primary_motivation=primary_motivation,
secondary_motivations=secondary_motivations,
labels=["intrusion-set"],
external_references=external_references,
object_marking_refs=object_marking_refs,
) | be8df574ac1be08c724620cf20495922cff5918e | 8,334 |
from ..core import Tensor
def broadcast_to(tensor, shape):
"""Broadcast an tensor to a new shape.
Parameters
----------
tensor : array_like
The tensor to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : Tensor
Raises
------
ValueError
If the tensor is not compatible with the new shape according to Mars's
broadcasting rules.
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.array([1, 2, 3])
>>> mt.broadcast_to(x, (3, 3)).execute()
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
tensor = tensor if isinstance(tensor, Tensor) else astensor(tensor)
shape = tuple(shape) if isinstance(shape, (list, tuple)) else (shape,)
if any(np.isnan(s) for s in tensor.shape):
raise ValueError('input tensor has unknown shape, '
'need to call `.execute()` first')
if tensor.shape == shape:
return tensor
new_ndim = len(shape) - tensor.ndim
if new_ndim < 0:
raise ValueError('input operand has more dimensions than allowed by the axis remapping')
if any(o != n for o, n in zip(tensor.shape, shape[new_ndim:]) if o != 1):
raise ValueError('operands could not be broadcast together '
'with remapped shapes [original->remapped]: {0} '
'and requested shape {1}'.format(tensor.shape, shape))
op = TensorBroadcastTo(shape, dtype=tensor.dtype, sparse=tensor.issparse())
return op(tensor, shape) | 3c738227f98d4ca8a6b1c0cc98cea769b697a987 | 8,335 |
def admin_required(handler_method):
"""Require that a user be an admin.
To use it, decorate your method like this::
@admin_required
def get(self):
...
"""
@wraps(handler_method)
def check_admin(*args, **kwargs):
"""Perform the check."""
if current_user.is_anonymous:
return redirect(url_for('home.login'))
if current_user.is_admin:
return handler_method(*args, **kwargs)
abort(401)
return check_admin | 03cc9e9cd32ab0b239f45c70fffcd85108b0173c | 8,336 |
import requests
def get(path):
"""Get."""
verify()
resp = requests.get(f"{URL}{path}", headers=auth)
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as e:
error_msg(str(e))
return
return resp.json() | c1661ac0e07ff467f15a429fe6fbf09d53a34ef9 | 8,337 |
def combine_dataframes(dfs: [pd.DataFrame]) -> pd.DataFrame:
"""
Receives a list of DataFrames and concatenates them. They must all have the same header.
:param dfs: List of DataFrames
:return: Single concatenated DataFrame
"""
df = pd.concat(dfs, sort=False)
return df | b7c1cd94870638a3a975ea7c4f9c284cbd4ee0a9 | 8,338 |
import os
def df_to_hdf5(df, key, dir_path):
"""
Save the DataFrame object as an HDF5 file. The file is stored
in the directory specified and uses the key for the filename
and 'h5' as the extension.
:param df: DataFrame to save as a file
:param key: ID for storage and retrieval
:param dir_path: Directory to store the HDF5 data file
"""
file_path = os.path.join(dir_path, key + '.h5')
df.to_hdf(file_path, key, complevel=9, complib='zlib')
return file_path | aaf7e2d9f64da6f53b2fc908cc59d9331172c614 | 8,339 |
def _bound_accumulated_rotations(robot_name, command_dicts):
"""
Checks axes whose rotations have been accumulated to ensure they've not
exceeded the stated limits. If they have, this function attempts to slide
the commands by +/- 360 degrees. If the limits are still exceeded, this
function returns the commands that exceed the limits by the least amount
:param robot_name:
:param animation_settings:
:param command_dicts:
:return:
"""
# TODO: Do this check using userOptions instead...
# Get axes, if they exist
command_axes = []
for command_dict in command_dicts:
axes = command_dict[postproc.AXES] if postproc.AXES in command_dict else None
command_axes.append(list(axes))
reconcile_axes = mimic_utils.get_reconcile_axes(robot_name)
rotation_limits = mimic_utils.get_all_limits(robot_name)['Position']
# Make sure the user has selected use of axes
if not all(x is None for x in command_axes):
for i, reconcile_axis in enumerate(reconcile_axes):
if reconcile_axis:
valid_solutions = []
axis_number = i + 1 # Axis numbers are 1-indexed
axis_name = 'Axis {}'.format(axis_number)
# Get the axis limits
limit_min = rotation_limits[axis_name]['Min Limit']
limit_max = rotation_limits[axis_name]['Max Limit']
# Create a list of commands for the axis to be checked
axis_vals_init = [axis[i] for axis in command_axes]
axis_min = min(axis_vals_init)
axis_max = max(axis_vals_init)
'''
print "#######################################################"
print "Initial Axis {} vals: ".format(i+1), axis_vals_init
print "Axis Min Limit: ", limit_min
print "Axis Max Limit: ", limit_max
print "Axis Min: ", axis_min
print "Axis Max: ", axis_max
'''
# If both the max and min axis values exceed their respective
# limits, then there's nothing we can do about it, so we don't
# modify the commands
if axis_min < limit_min and axis_max > limit_max:
# print '## Both limits exceeded, but no shift'
continue
# If no limits are violated, add the axes to the list of valid solutions
if axis_min >= limit_min and axis_max <= limit_max:
valid_solutions.append(axis_vals_init)
# Get the shifted axes and append them to the valid_solutions if they're valide (i.e. not 'None')
axis_vals_shift = _shift_accumulated_axes(axis_vals_init, limit_max, limit_min)
if axis_vals_shift:
valid_solutions.append(axis_vals_shift)
# If we have no valid solitions, continue with the initial solition
if len(valid_solutions) == 0:
# print 'No valid solutions, returning initial solutions'
sol = axis_vals_init
# If we only have one valid solution, we can return that solution
elif len(valid_solutions) == 1:
# print 'Only one valid solution'
sol = valid_solutions[0]
# If we have two valid solutions, prompt the user to pick which one they want
# if they have the option checked on the program UI, otherwise, return the
# first solution
else:
# print 'Two valid solutions -> user choice'
prompt_opt = pm.checkBox('cb_promptOnRedundantSolutions', value=True, query=True)
# If the user option for this feature is selected, prompt the user
if prompt_opt:
user_selection = _get_bounded_solution_user_input(valid_solutions, axis_number)
sol = valid_solutions[user_selection]
# Otherwise, continue with the initial solution
else:
sol = axis_vals_init
# Drop the final solution back into the command_dict
for command_index in range(len(command_dicts)):
command_axes[command_index][i] = sol[command_index]
reconciled_axes = postproc.Axes(*command_axes[command_index])
command_dicts[command_index][postproc.AXES] = reconciled_axes
return command_dicts | 68ca1357178563975af9a3baeb1ba89177993af4 | 8,340 |
def get_email_manager(config: CFG, session: Session):
"""
:return: EmailManager instance
"""
# TODO: Find a way to import properly without cyclic import
smtp_config = SmtpConfiguration(
config.EMAIL__NOTIFICATION__SMTP__SERVER,
config.EMAIL__NOTIFICATION__SMTP__PORT,
config.EMAIL__NOTIFICATION__SMTP__USER,
config.EMAIL__NOTIFICATION__SMTP__PASSWORD,
)
return EmailManager(config=config, smtp_config=smtp_config, session=session) | f5a7a3c934912e346b70ad12b475fd45d4d7069f | 8,341 |
import numpy
def _approx_sp(salt,pres):
"""Approximate TDl at SP.
Approximate the temperature and liquid water density of sea-ice with
the given salinity and pressure.
:arg float salt: Salinity in kg/kg.
:arg float pres: Pressure in Pa.
:returns: Temperature and liquid water density (both in SI units).
"""
CDIF = _CLIQ-_CICE
R0 = _LILTP/_TTP / CDIF
r1 = (pres-_PTPE) * (_VITP-_VLTP)/_TTP / CDIF
r2 = _RSAL*salt / CDIF
w = -(1 - R0 + r1) * numpy.exp(-(1 - R0 - r2))
negz = 1 - (1 + _E*w)**_C_SP
temp = (1 - R0 + r1)*_TTP/negz
dliq = _dliq_default(temp,pres)
return temp, dliq | b45727ad6f08cead1eb32477c8691233c38e9387 | 8,342 |
def _get_connection_params(resource):
"""Extract connection and params from `resource`."""
args = resource.split(";")
if len(args) > 1:
return args[0], args[1:]
else:
return args[0], [] | 87cdb607027774d58d1c3bf97ac164c48c32395c | 8,343 |
import subprocess
def download_archive(url, out_path):
"""Downloads a file from the specified URL to the specified path on disk."""
return subprocess.call(['curl', url, '-o', out_path]) | e3c59f542a8fa662169d74428ed98dbf79d3d705 | 8,344 |
from typing import Union
import requests
from typing import List
from typing import Dict
from typing import Any
from typing import Callable
from typing import cast
import operator
def listdata(
resp: Union[requests.Response, List[Dict[str, Any]]],
*keys: Union[str, Callable[[], bool]],
sort: Union[bool, str] = True,
full: bool = False, # returns dicts instead of tuples
) -> List[tuple]:
"""Return data from a given requests.Response object.
Only non reserved fields are returned.
By default data are converted to List[Tuple[Any]], but if `full` is True,
then List[Dict[str, Any] is returned.
Usage:
>>> data = [
... {'_id': 1, 'odd': 1, 'even': 2},
... {'_id': 2, 'odd': 3, 'even': 4},
... {'_id': 3, 'odd': 5, 'even': 6},
... }
>>> listdata(data)
[
(1, 2),
(3, 4),
(5, 6),
]
>>> listdata(data, 'even')
[2, 4, 6]
>>> listdata(data, 'odd', 'even')
[
(1, 2),
(3, 4),
(5, 6),
]
>>> listdata(data, full=True)
data = [
{'odd': 1, 'even': 2},
{'odd': 3, 'even': 4},
{'odd': 5, 'even': 6},
}
"""
# Prepare data
if isinstance(resp, list):
data = resp
elif resp.headers['content-type'].startswith('text/html'):
data = resp.context
assert resp.status_code == 200, pformat(data)
assert 'data' in data, pformat(data)
assert 'header' in data, pformat(data)
header = data['header']
keys = keys or [k for k in header if not k.startswith('_')]
data = [
{k: v.value for k, v in zip(header, row)}
for row in cast(List[List[Cell]], data['data'])
]
else:
data = resp.json()
assert resp.status_code == 200, pformat(data)
assert '_data' in data, pformat(data)
data = data['_data']
keys = keys or sorted({
k
for d in flatten(data)
for k in d
if not k.startswith('_')
})
# Clean data
if full:
data = [take(keys, row) for row in data]
elif len(keys) == 1:
k = keys[0]
data = [take(k, row) for row in data]
else:
data = [tuple(take(k, row) for k in keys) for row in data]
# Sort
if sort is True:
data = sorted(data, key=str)
elif sort:
if full:
sort_key = operator.itemgetter(sort)
else:
sort_key = operator.itemgetter(keys.index(sort))
data = sorted(data, key=sort_key)
return data | 878df7c56f97a3fe2c92499955bb760888673bbc | 8,345 |
import os
import yaml
def create_chart_data(start_path, excluded_dashboards=excluded_dashboards):
"""Read chart names and SQL code from the repository.
Args:
start_path (str): "./dashboards"
excluded_dashboards (list): list of dashboards to exclude from testing (e.g. WIP, Untitled, etc)
Returns:
chart_results (array of dict): contains name, dashboard owner and SQL code for each chart
"""
chart_results = []
print("Processing the charts data...\n")
for path, _, files in os.walk(start_path):
for filename in files:
if ('sql' in filename) and ('text' not in filename) and all(dashboard not in path for dashboard in excluded_dashboards):
chart_dict = {}
path_sql = os.path.join(path, filename)
dashboard_with_id = path_sql.split('/')[2]
chart_with_id = path_sql.split('/')[3]
chart_dict["NAME"] = 'chart_' + dashboard_with_id.split('.')[0] + '_' + chart_with_id.replace(".", "_")
try:
with open(path_sql) as f:
chart_dict['SQL_CODE'] = f.read()
except Exception as e:
print(e)
path_chart_yaml = os.path.join(path, filename.replace(".sql", ".yaml"))
try:
with open(path_chart_yaml) as f:
parsed_yaml_file = yaml.load(f, Loader=yaml.FullLoader)
chart_name = parsed_yaml_file['display_name']
except Exception as e:
print(e)
path_dashboard_yaml = os.path.join(start_path, dashboard_with_id, dashboard_with_id.split('.')[0] + '.yaml')
try:
with open(path_dashboard_yaml) as f:
parsed_yaml_file = yaml.load(f, Loader=yaml.FullLoader)
dashboard_name = parsed_yaml_file['display_name']
chart_dict["OWNER"] = parsed_yaml_file['dashboard_preferences']['settings']['owner'] or "No Owner"
chart_dict["BI_NAME"] = (dashboard_name + ": " + chart_name) or "No Name"
except Exception as e:
chart_dict["OWNER"] = "No Owner"
chart_dict["BI_NAME"] = "No Name"
print(e)
chart_results.append(chart_dict)
return chart_results | 7c3de75b033f2467058fea2843bcc21a0488ab59 | 8,346 |
def get_current_pkg():
"""
Returns:
パッケージ名 (str): 常に大文字表記で返ってくる
"""
return eval_foreign_vm_copy("(send *package* :name)") | 16c768dace7a4e88f7d7eb21aab58ce917f2ce43 | 8,347 |
def _normalise_trigger(value: float) -> float:
"""
Helper function used to normalise the controller trigger values into a common range.
:param value: Value to be normalised
:raises: ValueError
:return: Normalised value
"""
return _normalise(value, _HARDWARE_TRIGGER_MIN, _HARDWARE_TRIGGER_MAX, _INTENDED_TRIGGER_MIN, _INTENDED_TRIGGER_MAX) | d5653da9f625896865a3fd9601d3f2707cba6e8c | 8,348 |
def full_process(s):
"""Process string by
-- removing all but letters and numbers
-- trim whitespace
-- force to lower case"""
if s is None:
return ""
#Here we weill force a return of "" if it is of None, empty, or not valid
#Merged from validate_string
try:
s = unicode(s)
len(s) > 0
except TypeError:
return ""
# Keep only Letters and Numbers (see Unicode docs).
string_out = StringProcessor.replace_with_whitespace(s)
# Force into lowercase.
string_out = StringProcessor.to_lower_case(string_out)
# Remove leading and trailing whitespaces.
string_out = StringProcessor.strip(string_out)
return string_out | 071ba938708f170b914576895f7cab1aa8cb1cc3 | 8,349 |
import math
def ldexp(space, x, i):
"""ldexp(x, i) -> x * (2**i)
"""
return math2(space, math.ldexp, x, i) | ef083d77ff36acbc7d7dfede0772e9e8bf34b17a | 8,350 |
def menu():
"""
Print a menu with all the functionalities.
Returns:
The choice of the user.
"""
print "=" * 33 + "\nMENU\n" + "=" * 33
descriptions = ["Load host from external file",
"Add a new host",
"Print selected hosts",
"Check active hosts",
"Select only active hosts",
"Select bots",
"Execute command locally",
"Execute command on bots",
"Run external script",
"Open shell in a host",
"Exit"]
for num, func in enumerate(descriptions):
print "[" + str(num) + "] " + func
choice = raw_input(">>> ")
return choice | 29bdce7c50cea7d9bbc5a27b71c803db91fe4eef | 8,351 |
from typing import Optional
from typing import Container
from typing import List
async def objects_get(bucket: Optional[str] = None,
index: Index = Depends(Provide[Container.index]),
buckets: Buckets = Depends(Provide[Container.buckets])) -> List[Object]:
"""
searches for objects
"""
if not bucket:
return index.get_all()
buckets.validate_bucket(bucket)
return index.get_all(bucket) | f6949922ac5c355469fbaf450758180ac422f33a | 8,352 |
import json
def thumbnail_create(request, repo_id):
"""create thumbnail from repo file list
return thumbnail src
"""
content_type = 'application/json; charset=utf-8'
result = {}
repo = get_repo(repo_id)
if not repo:
err_msg = _(u"Library does not exist.")
return HttpResponse(json.dumps({"error": err_msg}), status=400,
content_type=content_type)
path = request.GET.get('path', None)
if not path:
err_msg = _(u"Invalid arguments.")
return HttpResponse(json.dumps({"error": err_msg}), status=400,
content_type=content_type)
if repo.encrypted or not ENABLE_THUMBNAIL or \
check_folder_permission(request, repo_id, path) is None:
err_msg = _(u"Permission denied.")
return HttpResponse(json.dumps({"error": err_msg}), status=403,
content_type=content_type)
size = request.GET.get('size', THUMBNAIL_DEFAULT_SIZE)
success, status_code = generate_thumbnail(request, repo_id, size, path)
if success:
src = get_thumbnail_src(repo_id, size, path)
result['encoded_thumbnail_src'] = urlquote(src)
return HttpResponse(json.dumps(result), content_type=content_type)
else:
err_msg = _('Failed to create thumbnail.')
return HttpResponse(json.dumps({'err_msg': err_msg}),
status=status_code, content_type=content_type) | 876f9af7d61336f0f91f0b7277943265fb6e7a35 | 8,353 |
import uvloop
import asyncio
def initialize_event_loop():
"""Attempt to use uvloop."""
try:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
return asyncio.get_event_loop() | cba229f5330bc89a60607f9523d3727b6db30094 | 8,354 |
def _setup_pgops(multi_actions=False,
normalise_entropy=False,
sequence_length=4,
batch_size=2,
num_mvn_actions=3,
num_discrete_actions=5):
"""Setup polices, actions, policy_vars and (optionally) entropy_scale_op."""
t = sequence_length
b = batch_size
a = num_mvn_actions
c = num_discrete_actions
# MVN actions
mu = tf.placeholder(tf.float32, shape=(t, b, a))
sigma = tf.placeholder(tf.float32, shape=(t, b, a))
mvn_policies = tfp.distributions.MultivariateNormalDiag(
loc=mu, scale_diag=sigma)
mvn_actions = tf.placeholder(tf.float32, shape=(t, b, a))
mvn_params = [mu, sigma]
if multi_actions:
# Create a list of n_cat Categorical distributions
n_cat = 2
cat_logits = [tf.placeholder(tf.float32, shape=(t, b, c))
for _ in xrange(n_cat)]
cat_policies = [tfp.distributions.Categorical(logits=logits)
for logits in cat_logits]
cat_actions = [tf.placeholder(tf.int32, shape=(t, b))
for _ in xrange(n_cat)]
cat_params = [[logits] for logits in cat_logits]
# Create an exponential distribution
exp_rate = tf.placeholder(tf.float32, shape=(t, b))
exp_policies = tfp.distributions.Exponential(rate=exp_rate)
exp_actions = tf.placeholder(tf.float32, shape=(t, b))
exp_params = [exp_rate]
# Nest all policies and nest corresponding actions and parameters
policies = [mvn_policies, cat_policies, exp_policies]
actions = [mvn_actions, cat_actions, exp_actions]
policy_vars = [mvn_params, cat_params, exp_params]
else:
# No nested policy structure
policies = mvn_policies
actions = mvn_actions
policy_vars = mvn_params
entropy_scale_op = None
if normalise_entropy:
# Scale op that divides by total action dims
def scale_op(policies):
policies = nest.flatten(policies)
num_dims = [tf.to_float(tf.reduce_prod(policy.event_shape_tensor()))
for policy in policies]
return 1. / tf.reduce_sum(tf.stack(num_dims))
entropy_scale_op = scale_op
return policies, actions, policy_vars, entropy_scale_op | 5d6ddc58db39496fed3c99c214c1f835ee49f2ea | 8,355 |
async def definition_delete(hub, ctx, name, **kwargs):
"""
.. versionadded:: 1.0.0
Delete a policy definition.
:param name: The name of the policy definition to delete.
CLI Example:
.. code-block:: bash
azurerm.resource.policy.definition_delete testpolicy
"""
result = False
polconn = await hub.exec.azurerm.utils.get_client(ctx, "policy", **kwargs)
try:
# pylint: disable=unused-variable
policy = polconn.policy_definitions.delete(policy_definition_name=name)
result = True
except (CloudError, ErrorResponseException) as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
return result | 7f3e6b2b3b6bbcea590a042b923fd797a56840ee | 8,356 |
import optparse
def parse_options(argv):
"""Parses and checks the command-line options.
Returns:
A tuple containing the options structure and a list of categories to
be traced.
"""
usage = 'Usage: %prog [options] [category1 [category2 ...]]'
desc = 'Example: %prog -b 32768 -t 15 gfx input view sched freq'
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('-o', dest='output_file', help='write trace output to FILE',
default=None, metavar='FILE')
parser.add_option('-t', '--time', dest='trace_time', type='int',
help='trace for N seconds', metavar='N')
parser.add_option('-l', '--list-categories', dest='list_categories',
default=False, action='store_true',
help='list the available categories and exit')
parser.add_option('-j', '--json', dest='write_json',
default=False, action='store_true',
help='write a JSON file')
parser.add_option('--link-assets', dest='link_assets', default=False,
action='store_true',
help='(deprecated)')
parser.add_option('--from-file', dest='from_file', action='store',
help='read the trace from a file (compressed) rather than'
'running a live trace')
parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
type='string', help='(deprecated)')
parser.add_option('-e', '--serial', dest='device_serial_number',
default=_get_default_serial(),
type='string', help='adb device serial number')
parser.add_option('--target', dest='target', default='android', type='string',
help='chose tracing target (android or linux)')
parser.add_option('--timeout', dest='timeout', type='int',
help='timeout for start and stop tracing (seconds)')
parser.add_option('--collection-timeout', dest='collection_timeout',
type='int', help='timeout for data collection (seconds)')
atrace_ftrace_options = optparse.OptionGroup(parser,
'Atrace and Ftrace options')
atrace_ftrace_options.add_option('-b', '--buf-size', dest='trace_buf_size',
type='int', help='use a trace buffer size '
' of N KB', metavar='N')
atrace_ftrace_options.add_option('--no-fix-threads', dest='fix_threads',
default=True, action='store_false',
help='don\'t fix missing or truncated '
'thread names')
atrace_ftrace_options.add_option('--no-fix-tgids', dest='fix_tgids',
default=True, action='store_false',
help='Do not run extra commands to restore'
' missing thread to thread group id '
'mappings.')
atrace_ftrace_options.add_option('--no-fix-circular', dest='fix_circular',
default=True, action='store_false',
help='don\'t fix truncated circular traces')
parser.add_option_group(atrace_ftrace_options)
# Add the other agent parsing options to the parser. For Systrace on the
# command line, all agents are added. For Android, only the compatible agents
# will be added.
for module in ALL_MODULES:
option_group = module.add_options(parser)
if option_group:
parser.add_option_group(option_group)
options, categories = parser.parse_args(argv[1:])
if options.output_file is None:
options.output_file = 'trace.json' if options.write_json else 'trace.html'
if options.link_assets or options.asset_dir != 'trace-viewer':
parser.error('--link-assets and --asset-dir are deprecated.')
if options.trace_time and options.trace_time < 0:
parser.error('the trace time must be a non-negative number')
if (options.trace_buf_size is not None) and (options.trace_buf_size <= 0):
parser.error('the trace buffer size must be a positive number')
return (options, categories) | df6910fb6600f8c4573eced74dfcd8bc6ec1a5ad | 8,357 |
def get_data_for_file(folder, ports):
"""Parses the pcap files in the specified folder, and outputs data for the specified ports
"""
# Load private keys and port->provider mappings
keys, providers, nodes = read_keys(os.path.join(folder, 'keys'))
print 'Loading packets'
# Load packets
with open(os.path.join(folder, 'network.pcap'), 'rb') as f:
cap = dpkt.pcap.Reader(f)
packets = []
for ts, buf in cap:
eth = dpkt.sll.SLL(buf)
if eth.type != 3:
# tcpdump captures both type 3 and 4 packets, resulting in duplicates
continue
eth.time = ts
try:
eth.data.src = socket.inet_ntoa(eth.data.src)
eth.data.dst = socket.inet_ntoa(eth.data.dst)
except:
pass
packets.append(eth)
# Load config
config = json.load(open(os.path.join(folder, 'config.json')))
# Invert exponential parameters to get rate
loops, drop, payload = 1/config['EXP_PARAMS_LOOPS'], 1 / \
config['EXP_PARAMS_DROP'], 1/config['EXP_PARAMS_PAYLOAD']
lambda_total = loops + drop + payload
print "λ_loop = %f, λ_drop = %f, λ_payload = %f, λ = %f" % (
loops, drop, payload, lambda_total)
data = []
for port in ports:
print "Parsing port %d from %s" % (port, folder)
# Filter packets by source port
filtered = [x for x in packets if x.data.data.sport == port]
print "Analysing all packets"
all_mean = analyse_packets(filtered, packets[0].time, packets[-1].time)
print "-----------------"
decrypted_filtered = [(x, decrypt_packet(
x, keys[nodes[get_addr(x.data.dst, x.data.data.dport)]], keys)) for x in filtered]
real_filtered = [
x for x, decrypt in decrypted_filtered if decrypt[0] == 'REAL']
if len(real_filtered) == 0:
print "Warning, 0 real packets"
real_mean = None
else:
print "Analysing real packets"
real_mean = analyse_packets(
real_filtered, packets[0].time, packets[-1].time)
print "\n-----------------\n"
data.append((port, loops, drop, payload,
lambda_total, all_mean, real_mean))
return data | be81404b54231bde3444d3b3545cafa1c836c074 | 8,358 |
async def create_channel_in_db(
context: 'Context',
game_config: 'GameConfig',
channel_id: str,
finished: bool = False
) -> Channel:
"""Utility function to create a channel in the database
:param context: The Discord Context.
:param game_config: The GameConfig to use for extra info.
:param finished: Whether or not the channel is finished.
:return: The Channel that was created"""
owner = (await sync_to_async(User.objects.get_or_create)(id=context.author.id))[0]
return await sync_to_async(Channel.objects.create)(
id=channel_id,
owner=owner, guild_id=context.guild.id, game=game_config.game,
finished=finished
) | a2ea09b436c4eeabd37c0c8220d791d89db3912f | 8,359 |
def retry(times, func, *args, **kwargs):
"""Try to execute multiple times function mitigating exceptions.
:param times: Amount of attempts to execute function
:param func: Function that should be executed
:param args: *args that are passed to func
:param kwargs: **kwargs that are passed to func
:raises Exception: Raise any exception that can raise func
:returns: Result of func(*args, **kwargs)
"""
for i in range(times):
try:
return func(*args, **kwargs)
except Exception:
if i == times - 1:
raise | 7e9fd482a70409d62ea108ddaa83440fcd2b024f | 8,360 |
def _apply_prediction(G, func, ebunch=None):
"""Applies the given function to each edge in the specified iterable
of edges.
`G` is an instance of :class:`networkx.Graph`.
`ebunch` is an iterable of pairs of nodes. If not specified, all
non-edges in the graph `G` will be used.
"""
if ebunch is None:
ebunch = nx.non_edges(G)
return sorted([(u, v, func(G, u, v)) for u, v in ebunch], key = lambda t:t[2], reverse = True) | 5e046bf7608337f6ed046a71b8a3983f53109d46 | 8,361 |
def import_class(class_object):
"""
Import a class given a string with its name in the format module.module.classname
"""
d = class_object.rfind(".")
class_name = class_object[d + 1:len(class_object)]
m = __import__(class_object[0:d], globals(), locals(), [class_name])
return getattr(m, class_name) | 82df3ed7d646bd423ccefacc00493e917f13c430 | 8,362 |
def anonymous_fun_0_(empty_closure_0_):
"""
empty_closure_0_: ()
"""
def anonymous_fun_1_(par_map_input_1_):
"""
par_map_input_1_: Double
"""
def anonymous_fun_2_(par_map_input_0_):
"""
par_map_input_0_: Double
"""
def anonymous_fun_3_(fused_input_0_):
"""
fused_input_0_: (Double,Double)
"""
def anonymous_fun_4_(maybeRow_1_):
"""
maybeRow_1_: Maybe Double
"""
if maybeRow_1_ is not None:
cond_result_0_ = maybeRow_1_
else:
cond_result_0_ = 0.0
return cond_result_0_
def anonymous_fun_5_(row_1_):
"""
row_1_: Double
"""
if row_1_ < 5.0:
cond_result_1_ = row_1_
else:
cond_result_1_ = None
return cond_result_1_
def anonymous_fun_6_(maybeRow_0_):
"""
maybeRow_0_: Maybe Double
"""
if maybeRow_0_ is not None:
cond_result_2_ = maybeRow_0_
else:
cond_result_2_ = 0.0
return cond_result_2_
def anonymous_fun_7_(row_0_):
"""
row_0_: Double
"""
if row_0_ > 10.0:
cond_result_3_ = row_0_
else:
cond_result_3_ = None
return cond_result_3_
return ((fun_comp(anonymous_fun_4_,anonymous_fun_5_))(fused_input_0_[0]),(fun_comp(anonymous_fun_6_,anonymous_fun_7_))(fused_input_0_[1]))
def anonymous_fun_8_(dbrow_0_):
"""
dbrow_0_: Double
"""
return (dbrow_0_,dbrow_0_)
def anonymous_fun_9_(fused_input_1_):
"""
fused_input_1_: (Double,Double)
"""
def anonymous_fun_10_(maybeRow_1_):
"""
maybeRow_1_: Maybe Double
"""
if maybeRow_1_ is not None:
cond_result_4_ = maybeRow_1_
else:
cond_result_4_ = 0.0
return cond_result_4_
def anonymous_fun_11_(row_1_):
"""
row_1_: Double
"""
if row_1_ < 5.0:
cond_result_5_ = row_1_
else:
cond_result_5_ = None
return cond_result_5_
def anonymous_fun_12_(maybeRow_0_):
"""
maybeRow_0_: Maybe Double
"""
if maybeRow_0_ is not None:
cond_result_6_ = maybeRow_0_
else:
cond_result_6_ = 0.0
return cond_result_6_
def anonymous_fun_13_(row_0_):
"""
row_0_: Double
"""
if row_0_ > 10.0:
cond_result_7_ = row_0_
else:
cond_result_7_ = None
return cond_result_7_
return ((fun_comp(anonymous_fun_10_,anonymous_fun_11_))(fused_input_1_[0]),(fun_comp(anonymous_fun_12_,anonymous_fun_13_))(fused_input_1_[1]))
def anonymous_fun_14_(dbrow_1_):
"""
dbrow_1_: Double
"""
return (dbrow_1_,dbrow_1_)
return ((fun_comp(anonymous_fun_3_,anonymous_fun_8_))(par_map_input_0_),(fun_comp(anonymous_fun_9_,anonymous_fun_14_))(par_map_input_0_))
def anonymous_fun_15_(fused_input_2_):
"""
fused_input_2_: (Double,Double)
"""
def anonymous_fun_16_(maybeRow_1_):
"""
maybeRow_1_: Maybe Double
"""
if maybeRow_1_ is not None:
cond_result_8_ = maybeRow_1_
else:
cond_result_8_ = 0.0
return cond_result_8_
def anonymous_fun_17_(row_1_):
"""
row_1_: Double
"""
if row_1_ < 5.0:
cond_result_9_ = row_1_
else:
cond_result_9_ = None
return cond_result_9_
def anonymous_fun_18_(maybeRow_0_):
"""
maybeRow_0_: Maybe Double
"""
if maybeRow_0_ is not None:
cond_result_10_ = maybeRow_0_
else:
cond_result_10_ = 0.0
return cond_result_10_
def anonymous_fun_19_(row_0_):
"""
row_0_: Double
"""
if row_0_ > 10.0:
cond_result_11_ = row_0_
else:
cond_result_11_ = None
return cond_result_11_
return ((fun_comp(anonymous_fun_16_,anonymous_fun_17_))(fused_input_2_[0]),(fun_comp(anonymous_fun_18_,anonymous_fun_19_))(fused_input_2_[1]))
def anonymous_fun_20_(dbrow_2_):
"""
dbrow_2_: Double
"""
return (dbrow_2_,dbrow_2_)
return (anonymous_fun_2_(par_map_input_1_),(fun_comp(anonymous_fun_15_,anonymous_fun_20_))(par_map_input_1_))
return anonymous_fun_1_ | 2ea827153fadc359d056f4b981dbb5d3bf3711ee | 8,363 |
def get_examples(mode='train'):
"""
dataset[0][0] examples
"""
examples = {
'train':
({'id': '0a25cb4bc1ab6f474c699884e04601e4', 'title': '', 'context': '第35集雪见缓缓张开眼睛,景天又惊又喜之际,长卿和紫萱的仙船驶至,见众人无恙,'
'也十分高兴。众人登船,用尽合力把自身的真气和水分输给她。雪见终于醒过来了,但却一脸木然,全无反应。众人向常胤求助,却发现人世界竟没有雪见的身世纪录。长卿询问清微的身世,'
'清微语带双关说一切上了天界便有答案。长卿驾驶仙船,众人决定立马动身,往天界而去。众人来到一荒山,长卿指出,魔界和天界相连。由魔界进入通过神魔之井,便可登天。众人至魔界入口,'
'仿若一黑色的蝙蝠洞,但始终无法进入。后来花楹发现只要有翅膀便能飞入。于是景天等人打下许多乌鸦,模仿重楼的翅膀,制作数对翅膀状巨物。刚佩戴在身,便被吸入洞口。众人摔落在地,'
'抬头发现魔界守卫。景天和众魔套交情,自称和魔尊重楼相熟,众魔不理,打了起来。', 'question': '仙剑奇侠传3第几集上天界', 'answers': ['第35集'], 'answer_starts': [0]}),
}
return examples[mode] | 0b5fb45bcac847cd3f7e7b3e5b264e350c891211 | 8,364 |
from IPython.core.display import HTML
def display_tables(tables, max_rows=10, datetime_fmt='%Y-%m-%d %H:%M:%S', row=True):
"""Display mutiple tables side by side on a Jupyter Notebook.
Args:
tables (dict[str, DataFrame]):
``dict`` containing table names and pandas DataFrames.
max_rows (int):
Max rows to show per table. Defaults to 10.
datetime_fmt (str):
Format with which to display datetime columns.
"""
# Import here to avoid making IPython a hard dependency
names = []
data = []
for name, table in tables.items():
table = table.copy()
for column in table.columns:
column_data = table[column]
if column_data.dtype.kind == 'M':
table[column] = column_data.dt.strftime(datetime_fmt)
names.append('<td style="text-align:left"><b>{}</b></td>'.format(name))
data.append('<td>{}</td>'.format(table.head(max_rows).to_html(index=False)))
if row:
html = '<table><tr>{}</tr><tr>{}</tr></table>'.format(
''.join(names),
''.join(data),
)
else:
rows = [
'<tr>{}</tr><tr>{}</tr>'.format(name, table)
for name, table in zip(names, data)
]
html = '<table>{}</table>'.format(''.join(rows))
return HTML(html) | 904a900e97aab4809ea5057025a5a7a075429942 | 8,365 |
def get_visible_enemy_units(observation, as_list=False, as_dict=True):
"""
This function takes an observation and returns a list of the enemy units that are
on screen and are visible.
A unit is considered visible if is either visible (in the protos sense) or if it
is snapshotted and finished.
The definition of display_type can be found here:
https://github.com/Blizzard/s2client-proto/blob/master/s2clientprotocol/raw.proto#L55
"""
if as_list == as_dict:
raise ValueError("One and only one of as_list and as_dict should be True")
if as_list == True:
visible_enemy_units = []
for unit in observation.raw_data.units:
if unit.alliance == 4 and unit.is_on_screen:
if unit.display_type == 1 or (
unit.display_type == 2 and unit.build_progress == 1
):
visible_enemy_units.append(get_unit_doc(unit))
if as_dict == True:
# TO-DO: fix this one, this is the root of all bugs.
visible_enemy_units = {}
for unit in observation.raw_data.units:
if unit.alliance == 4 and unit.is_on_screen:
if unit.display_type == 1 or (
unit.display_type == 2 and unit.build_progress == 1
):
if str(unit.unit_type) not in visible_enemy_units:
visible_enemy_units[str(unit.unit_type)] = [get_unit_doc(unit)]
else:
visible_enemy_units[str(unit.unit_type)].append(
get_unit_doc(unit)
)
return visible_enemy_units | e134892664e70427c67093812b3a79cc45c8f29c | 8,366 |
def prepare_default_result_dict(key, done, nodes):
"""Prepares the default result `dict` using common values returned by any
operation on the DHT.
Returns:
dict: with keys `(k, d, n)` for the key, done and nodes; `n` is a list
of `dict` with keys `(i, a, x)` for id, address, and expiration.
"""
d = {
"k": key,
"d": done,
}
nb = []
for n in nodes:
_node = n.getNode()
nb.append({
"i": n.getId().toString(),
"a": _node.getAddr(),
"x": _node.isExpired()
})
d["n"] = nb
return d | 420beb66352fee7b4d38f6b4cf628cbaa86a03df | 8,367 |
def MatchScorer(match, mismatch):
"""Factory function that returns a score function set to match and mismatch.
match and mismatch should both be numbers. Typically, match should be
positive and mismatch should be negative.
Resulting function has signature f(x,y) -> number.
"""
def scorer(x, y):
if x == y:
return match
else:
return mismatch
return scorer | fe3829efc64cb4d9785e52b8af6949c147481902 | 8,368 |
import random
def random_choice(context: RuntimeContext, *choices):
"""Template helper for random choices.
Supports structures like this:
random_choice:
- a
- b
- <<c>>
Or like this:
random_choice:
- choice:
pick: A
probability: 50%
- choice:
pick: A
probability: 50%
Probabilities are really just weights and don't need to
add up to 100.
Pick-items can have arbitrary internal complexity.
Pick-items are lazily evaluated.
"""
if not choices:
raise ValueError("No choices supplied!")
if getattr(choices[0], "function_name", None) == "choice":
choices = [choice.render(context) for choice in choices]
rc = weighted_choice(choices)
else:
rc = random.choice(choices)
if hasattr(rc, "render"):
rc = rc.render(context)
return rc | cc74c4106e2263e4b46ef25ed5cb83839040bb5f | 8,369 |
def _compute_paddings(height_pad_amt, width_pad_amt, patch_axes):
"""Convert the total pad amounts to the format needed by tf.pad()."""
top_pad = height_pad_amt // 2
bottom_pad = height_pad_amt - top_pad
left_pad = width_pad_amt // 2
right_pad = width_pad_amt - left_pad
paddings = [[0, 0] for _ in range(4)]
paddings[patch_axes[0]] = [top_pad, bottom_pad]
paddings[patch_axes[1]] = [left_pad, right_pad]
return paddings | 3a5154ba0fa6808bc6dc8e20fcb4203324762ba9 | 8,370 |
def tab_size(computer, name, value):
"""Compute the ``tab-size`` property."""
if isinstance(value, int):
return value
else:
return length(computer, name, value) | f121cc308f4c88e021e240767ae03479a26a46f6 | 8,371 |
def match_complete(user_id=""):
"""Switch 'complete' to true in matches table for user, return tallies."""
print("match_complete", user_id)
user = sm.get_user(user_id)
# Note: 0/1 used for 'complete' b/c Booleans not allowed in SimpleObjects
this_match, i = current_match_i(user)
temp = this_match['complete']
temp[i] = 1
this_match['complete'] = temp
return _get_tallies(user) | 1af499c671f209ba8bc9333e372947c90b9a2b8c | 8,372 |
def compute_range_map(flow,
downsampling_factor=1,
reduce_downsampling_bias=True,
resize_output=True):
"""Count how often each coordinate is sampled.
Counts are assigned to the integer coordinates around the sampled coordinates
using weights from bilinear interpolation.
Args:
flow: A float tensor of shape (batch size x height x width x 2) that
represents a dense flow field.
downsampling_factor: An integer, by which factor to downsample the output
resolution relative to the input resolution. Downsampling increases the
bin size but decreases the resolution of the output. The output is
normalized such that zero flow input will produce a constant ones output.
reduce_downsampling_bias: A boolean, whether to reduce the downsampling bias
near the image boundaries by padding the flow field.
resize_output: A boolean, whether to resize the output at the input
resolution.
Returns:
A float tensor of shape [batch_size, height, width, 1] that denotes how
often each pixel is sampled.
"""
# Get input shape.
input_shape = flow.shape.as_list()
if len(input_shape) != 4:
raise NotImplementedError()
batch_size, input_height, input_width, _ = input_shape
flow_height = input_height
flow_width = input_width
# Apply downsampling (and move the coordinate frame appropriately).
output_height = input_height // downsampling_factor
output_width = input_width // downsampling_factor
if downsampling_factor > 1:
# Reduce the bias that comes from downsampling, where pixels at the edge
# will get lower counts that pixels in the middle of the image, by padding
# the flow field.
if reduce_downsampling_bias:
p = downsampling_factor // 2
flow_height += 2 * p
flow_width += 2 * p
# Apply padding in multiple steps to padd with the values on the edge.
for _ in range(p):
flow = tf.pad(
tensor=flow,
paddings=[[0, 0], [1, 1], [1, 1], [0, 0]],
mode='SYMMETRIC')
coords = flow_to_warp(flow) - p
# Update the coordinate frame to the downsampled one.
coords = (coords + (1 - downsampling_factor) * 0.5) / downsampling_factor
elif downsampling_factor == 1:
coords = flow_to_warp(flow)#after warpping image
else:
raise ValueError('downsampling_factor must be an integer >= 1.')
# Split coordinates into an integer part and a float offset for interpolation.
coords_floor = tf.floor(coords)#返回一个具有相同类型的张量,不大于的最大整数
coords_offset = coords - coords_floor
coords_floor = tf.cast(coords_floor, 'int32')
# Define a batch offset for flattened indexes into all pixels.
batch_range = tf.reshape(tf.range(batch_size), [batch_size, 1, 1])
idx_batch_offset = tf.tile(
batch_range, [1, flow_height, flow_width]) * output_height * output_width
# Flatten everything.
coords_floor_flattened = tf.reshape(coords_floor, [-1, 2])#zhengshu
coords_offset_flattened = tf.reshape(coords_offset, [-1, 2])#xiaoshu
idx_batch_offset_flattened = tf.reshape(idx_batch_offset, [-1])#suoyin
# Initialize results.
idxs_list = []
weights_list = []
# Loop over differences di and dj to the four neighboring pixels.
for di in range(2):
for dj in range(2):
# Compute the neighboring pixel coordinates.
idxs_i = coords_floor_flattened[:, 0] + di
idxs_j = coords_floor_flattened[:, 1] + dj
# Compute the flat index into all pixels.
idxs = idx_batch_offset_flattened + idxs_i * output_width + idxs_j
# Only count valid pixels.
mask = tf.reshape(
tf.compat.v1.where(
tf.logical_and(
tf.logical_and(idxs_i >= 0, idxs_i < output_height),
tf.logical_and(idxs_j >= 0, idxs_j < output_width))), [-1])
valid_idxs = tf.gather(idxs, mask)
valid_offsets = tf.gather(coords_offset_flattened, mask)
# Compute weights according to bilinear interpolation.
weights_i = (1. - di) - (-1)**di * valid_offsets[:, 0]
weights_j = (1. - dj) - (-1)**dj * valid_offsets[:, 1]
weights = weights_i * weights_j
# Append indices and weights to the corresponding list.
idxs_list.append(valid_idxs)
weights_list.append(weights)
# Concatenate everything.
idxs = tf.concat(idxs_list, axis=0)
weights = tf.concat(weights_list, axis=0)
# Sum up weights for each pixel and reshape the result.
counts = tf.math.unsorted_segment_sum(
weights, idxs, batch_size * output_height * output_width)
count_image = tf.reshape(counts, [batch_size, output_height, output_width, 1])
if downsampling_factor > 1:
# Normalize the count image so that downsampling does not affect the counts.
count_image /= downsampling_factor**2
if resize_output:
count_image = resize(
count_image, input_height, input_width, is_flow=False)
return count_image | fa73194435ae893dcd359f93f1488a6b654f8d31 | 8,373 |
from typing import List
def get_wer(refs: List[str], hyps: List[str]):
"""
args:
refs (list of str): reference texts
hyps (list of str): hypothesis/prediction texts
"""
n_words, n_errors = 0, 0
for ref, hyp in zip(refs, hyps):
ref, hyp = ref.split(), hyp.split()
n_words += len(ref)
n_errors += editdistance.eval(ref, hyp)
return safe_divide(n_errors, n_words) | fb142f4d048bffca1a1119e4a2e7c68e1effcbfc | 8,374 |
def get_first(somelist, function):
""" Returns the first item of somelist for which function(item) is True """
for item in somelist:
if function(item):
return item
return None | 81976910c46102d3b15803d215f3bf5a554f9beb | 8,375 |
def np_cross(a, b):
"""
Simple numba compatible cross product of vectors
"""
return np.array([
a[1] * b[2] - a[2] * b[1],
a[2] * b[0] - a[0] * b[2],
a[0] * b[1] - a[1] * b[0],
]) | 0d19a1bfdf7bf5d6835203f61654edc8263b3bbd | 8,376 |
import sys
import locale
def get_process_output(process, encoding=None):
"""Get the output from the process."""
output = process.communicate()
returncode = process.returncode
if not encoding:
try:
encoding = sys.stdout.encoding
except Exception:
encoding = locale.getpreferredencoding()
if returncode != 0:
raise RuntimeError("Runtime Error: %s" % (output[0].rstrip().decode(encoding, errors='replace')))
return output[0].decode(encoding, errors='replace') | 84622c05e84627d0651d21194391c672fb111b6f | 8,377 |
import tqdm
from sys import stdout
def dct_2d(pixel_blocks: np.ndarray, verbose: int = 0) -> np.ndarray:
"""
Does 8x8 2D DCT on an image represented by pixel blocks.
:param pixel_blocks:
A np.ndarray of shape AxBx8x8x3, where A = H/8, B = W/8.
:param verbose:
An int; if greater than 0 will print out a tqdm progress bar.
:return:
A np.ndarray of shape AxBx8x8x3, where A = H/8, B = W/8.
"""
to_return = list()
if verbose > 0:
pbar = tqdm(total=pixel_blocks.shape[0] * pixel_blocks.shape[1], file=stdout)
for row in pixel_blocks:
current_row = list()
for pixel_block in row:
current_row.append(dct_2d_on_8x8_block(pixel_block))
if verbose > 0:
pbar.update()
to_return.append(current_row)
if verbose > 0:
pbar.close()
return np.array(to_return) | 00e6421d186635032fd4afc1fd932ee977c71c70 | 8,378 |
import itertools
def remove_duplicates(llist):
"""
Removes any and all duplicate entries in the specified list.
This function is intended to be used during dataset merging and
therefore must be able to handle list-of-lists.
:param llist: The list to prune.
:return: A list of unique elements only.
"""
if not llist:
return []
llist.sort()
return [x for x, _ in itertools.groupby(llist)] | cbdf1a4db99a7a5fac37f25776cc1387ed8c54e0 | 8,379 |
def images_in_bbox(bbox: dict, **filters) -> str:
"""
Gets a complete list of images with custom filter within a BBox
:param bbox: Bounding box coordinates
Format::
>>> {
... 'west': 'BOUNDARY_FROM_WEST',
... 'south': 'BOUNDARY_FROM_SOUTH',
... 'east': 'BOUNDARY_FROM_EAST',
... 'north': 'BOUNDARY_FROM_NORTH'
... }
:type bbox: dict
:param filters: Different filters that may be applied to the output
Example filters::
- max_captured_at
- min_captured_at
- image_type: pano, flat, or all
- compass_angle
- sequence_id
- organization_id
:type filters: dict
:return: Output is a GeoJSON string that represents all the within a bbox after passing given
filters
:rtype: str
Usage::
>>> import mapillary as mly
>>> mly.interface.set_access_token('MLY|XXX')
>>> mly.interface.images_in_bbox(
... bbox={
... 'west': 'BOUNDARY_FROM_WEST',
... 'south': 'BOUNDARY_FROM_SOUTH',
... 'east': 'BOUNDARY_FROM_EAST',
... 'north': 'BOUNDARY_FROM_NORTH'
... },
... max_captured_at='YYYY-MM-DD HH:MM:SS',
... min_captured_at='YYYY-MM-DD HH:MM:SS',
... image_type='pano',
... compass_angle=(0, 360),
... sequence_id='SEQUENCE_ID',
... organization_id='ORG_ID'
... )
"""
return image.get_images_in_bbox_controller(
bounding_box=bbox, layer="image", zoom=14, filters=filters
) | 317554d0f666753cdfc8a3657f7f0b92d5af141d | 8,380 |
def find_start_time_from_afl(project_base_dir):
"""
Finds the start time of a project from afl directories.
This time is taken from the fuzzer_stats entry of
the first config iteration's fuzzer.
"""
try:
first_main_dir = main_dirs_for_proj(project_base_dir)[0]
except:
#if fuzzware-project dir exists but contains no mainXXX dirs
return 0
first_fuzzer_dir = fuzzer_dirs_for_main_dir(first_main_dir)[0]
fuzzer_stats_path = first_fuzzer_dir.joinpath("fuzzer_stats")
with open(fuzzer_stats_path, "r") as f:
start_time = int(f.readline().split(": ")[1])
return start_time | f8f21b65e1901615e16953da48ac39008dcb240b | 8,381 |
def filter_none_values(d, recursive=True):
"""
Returns a filtered copy of a dict, with all keys associated with 'None' values removed.
adapted from: http://stackoverflow.com/q/20558699
adapted from: http://stackoverflow.com/a/20558778
:param d: a dict-like object.
:param recursive: If True, performs the operation recursively on inner elements of the object.
:return: a new dict (of the same type as the original) containing the original dict's values,
except as modified per this function's documented effects.
>>> filter_none_values(None) is None
True
>>> filter_none_values(1)
Traceback (most recent call last):
TypeError: d is not a dict-like object.
>>> filter_none_values({})
{}
>>> filter_none_values({'a': 1, 'b': None, 'c': '3'})
{'a': 1, 'c': '3'}
>>> filter_none_values({'a': 1, 'b': [1, None, 3], 'c': '3'})
{'a': 1, 'c': '3', 'b': [1, 3]}
>>> filter_none_values({'a': 1, 'b': [1, {'ba': 1, 'bb': None, 'bc': '3'}, 3], 'c': '3'})
{'a': 1, 'c': '3', 'b': [1, {'ba': 1, 'bc': '3'}, 3]}
>>> from collections import OrderedDict as od; filter_none_values(od((('a', 1), ('b', None), ('c', '3'))))
OrderedDict([('a', 1), ('c', '3')])
>>> from collections import OrderedDict as od; filter_none_values({'r': od((('a', 1), ('b', None), ('c', '3')))})
{'r': OrderedDict([('a', 1), ('c', '3')])}
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": null, "c": 3}')))
"{u'a': 1, u'c': 3}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": [], "c": 3}')))
"{u'a': 1, u'c': 3, u'b': []}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": {"ba": null}, "c": 3}')))
"{u'a': 1, u'c': 3, u'b': {}}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": {"ba": []}, "c": 3}')))
"{u'a': 1, u'c': 3, u'b': {u'ba': []}}"
>>> from json import loads; repr(filter_none_values(loads('{"a": 1, "b": {"ba": {"baa": null}}, "c": 3}')))
"{u'a': 1, u'c': 3, u'b': {u'ba': {}}}"
"""
# def my_remove_none(obj):
# """Note: adapted from remove_none."""
# if isinstance(obj, (collections.Sequence, list, tuple, set)):
# return type(obj)(remove_none(x) for x in obj if x is not None)
# elif isinstance(obj, (collections.Mapping, dict)):
# return type(obj)((remove_none(k), remove_none(v))
# for k, v in obj.items() if k is not None and v is not None)
# else:
# return obj
def remove_none(obj):
"""Note: This one seems to be functionally equivalent to purify (at least for the cases I tested)."""
if isinstance(obj, (list, tuple, set)):
return type(obj)(remove_none(x) for x in obj if x is not None)
elif isinstance(obj, dict):
return type(obj)((remove_none(k), remove_none(v))
for k, v in obj.items() if k is not None and v is not None)
else:
return obj
def purify(o):
"""Note: This one seems to be functionally equivalent to remove_none (at least for the cases I tested)."""
if hasattr(o, 'items'):
oo = type(o)()
for k in o:
if k is not None and o[k] is not None:
oo[k] = purify(o[k])
elif hasattr(o, '__iter__'):
oo = []
for it in o:
if it is not None:
oo.append(purify(it))
else:
return o
return type(o)(oo)
def strip_none(data):
"""Note: This one doesn't support OrderedDict, etc."""
if isinstance(data, dict):
return {k: strip_none(v) for k, v in data.items() if k is not None and v is not None}
elif isinstance(data, list):
return [strip_none(item) for item in data if item is not None]
elif isinstance(data, tuple):
return tuple(strip_none(item) for item in data if item is not None)
elif isinstance(data, set):
return {strip_none(item) for item in data if item is not None}
else:
return data
if d is None:
return None
elif not hasattr(d, 'items'):
raise TypeError('d is not a dict-like object.')
if recursive:
# return my_remove_none(d)
# return remove_none(d)
return purify(d)
# return strip_none(d)
else:
d = d.copy()
# remove all bad keys
bad_keys = [k for k, v in d.items() if v is None]
for k in bad_keys:
d.pop(k)
return d | 2a25ae331c99196c6f6eed7d5fe055f27583b1d2 | 8,382 |
def nonseq():
""" Return non sequence """
return 1 | 7c8f4a616a6761153226d961be02f6cf5b0cc54a | 8,383 |
import io
def load_image(path, color_space = None, target_size = None):
"""Loads an image as an numpy array
Arguments:
path: Path to image file
target_size: Either, None (default to original size)
or tuple of ints '(image height, image width)'
"""
img = io.imread(path)
if target_size:
img = cv2.resize(img, target_size, interpolation = cv2.INTER_CUBIC)
return img | 882210dff5dfa46596562483966b4a72c37aa7a8 | 8,384 |
def kubernetes_node_label_to_dict(node_label):
"""Load Kubernetes node label to Python dict."""
if node_label:
label_name, value = node_label.split("=")
return {label_name: value}
return {} | c856d4e6d1f2169f7028ce842edc881cbca4e783 | 8,385 |
def get_category_user_problem(cat_name, username):
"""
获取直接在指定目录下的用户AC的题目、尚未AC的题目和尚未做过的题目的情况
:param cat_name:
:param username:
:return:
"""
cat = __Category.objects.filter(name=cat_name).first()
user = __User.objects.filter(username=username).first()
if user is None or cat is None:
return {'solved': [], 'not_solved': [], 'not_tried': []}
query_dict = {}
relation = __ProblemUserRelation.objects.filter(user=user).values('problem_id', 'solved').distinct()
for i in relation:
query_dict[i['problem_id']] = i['solved']
problems = cat.problem.filter(category_relation__direct=True).values('id', 'title')
solved = []
not_solved = []
not_tried = []
for i in problems:
if i['id'] in query_dict:
if query_dict[i['id']] is True:
solved.append(i)
else:
not_solved.append(i)
else:
not_tried.append(i)
return {'solved': solved, 'not_solved': not_solved, 'not_tried': not_tried} | 96e78d527f5bd0002345973eb085e04246e936ae | 8,386 |
import math
def floatToJson(x):
"""Custom rule for converting non-finite numbers to JSON as quoted strings: ``"inf"``, ``"-inf"``, and ``"nan"``. This avoids Python's bad habit of putting literal ``Infinity``, ``-Infinity``, and ``NaN`` in the JSON (without quotes)."""
if x in ("nan", "inf", "-inf"):
return x
elif math.isnan(x):
return "nan"
elif math.isinf(x) and x > 0.0:
return "inf"
elif math.isinf(x):
return "-inf"
else:
return x | 938700c100d9176f6d950aee9ddf8f90109bedcc | 8,387 |
import os
def get_datafiles(datadir, prefix = ""):
"""
Scan directory for all csv files
prefix: used in recursive call
"""
datafiles = []
for fname in os.listdir(datadir):
fpath = os.path.join(datadir, fname)
datafile = os.path.join(prefix, fname)
if os.path.isdir(fpath):
datafiles += get_datafiles(fpath, datafile)
elif fname.endswith(".csv"):
datafiles.append(datafile)
return datafiles | 9d975985a7b16af75436f8941881982f8a39d5d7 | 8,388 |
import os
def auto_delete_file_on_change(sender, instance, **kwargs):
"""
Deletes old file from filesystem
when corresponding `MediaFile` object is updated
with new file.
"""
if not instance.pk:
return False
try:
old_file = sender.objects.get(pk=instance.pk).url_banner
except sender.DoesNotExist:
return False
new_file = instance.url_banner
if not old_file == new_file:
if os.path.isfile(old_file.path):
os.remove(old_file.path) | fa65394c95f3312e694e006751a7833c8d989af5 | 8,389 |
import math
def systematic_uncertainties():
"""tabulates sources of uncertainty and sums them in quadrature"""
result_m = [
0.066, # [0.07-0.12] 0.066 ± 0.019
0.019, # [0.12-0.20] 0.019 ± 0.009
0.002, # [0.20-0.30] 0.002 ± 0.009
-0.006, # [0.30-0.45] -0.006 ± 0.014
0.007, # [0.45-0.65] 0.007 ± 0.023
0.012 # [0.65-1.00] 0.012 ± 0.040
]
result_p = [
0.026, # [0.07-0.12] 0.026 ± 0.019
0.021, # [0.12-0.20] 0.021 ± 0.008
0.002, # [0.20-0.30] 0.002 ± 0.009
-0.014, # [0.30-0.45] -0.014 ± 0.013
0.024, # [0.45-0.65] 0.024 ± 0.022
0.046 # [0.65-1.00] 0.046 ± 0.037
]
pid_contamination = 0.10
pid_asym_m = [
( 0.051 , 0.038), # [0.07-0.12] 0.051 ± 0.038
(-0.017 , 0.016), # [0.12-0.20] -0.017 ± 0.016
(-0.032 , 0.016), # [0.20-0.30] -0.032 ± 0.016
(-0.006 , 0.023), # [0.30-0.45] -0.006 ± 0.023
(-0.031 , 0.042), # [0.45-0.65] -0.031 ± 0.042
( 0.089 , 0.085) # [0.65-1.00] 0.089 ± 0.085
]
pid_asym_p = [
( 0.005 , 0.036), # [0.07-0.12] 0.005 ± 0.036
( 0.006 , 0.015), # [0.12-0.20] 0.006 ± 0.015
(-0.006 , 0.015), # [0.20-0.30] -0.006 ± 0.015
( 0.018 , 0.020), # [0.30-0.45] 0.018 ± 0.020
(-0.038 , 0.032), # [0.45-0.65] -0.038 ± 0.032
( 0.142 , 0.059) # [0.65-1.00] 0.142 ± 0.059
]
for i in range(len(pid_asym_m)):
val, err = pid_asym_m[i]
pid_asym_m[i] = max( val-result_m[i], err)
for i in range(len(pid_asym_p)):
val, err = pid_asym_p[i]
pid_asym_p[i] = max( val-result_p[i], err)
beam_vector = 0.0102
asigma_m = [
0.035, # [0.07-0.12] 0.005 ± 0.035
0.015, # [0.12-0.20] -0.012 ± 0.015
0.016, # [0.20-0.30] -0.014 ± 0.016
0.027, # [0.30-0.45] -0.027 ± 0.023
0.066, # [0.45-0.65] -0.066 ± 0.040
0.073 # [0.65-1.00] -0.072 ± 0.073
]
asigma_p = [
0.034, # [0.07-0.12] -0.001 ± 0.034
0.014, # [0.12-0.20] -0.007 ± 0.014
0.015, # [0.20-0.30] 0.007 ± 0.015
0.025, # [0.30-0.45] -0.025 ± 0.022
0.039, # [0.45-0.65] -0.039 ± 0.037
0.061 # [0.65-1.00] 0.033 ± 0.061
]
mcasym_m = [
0.0066, # [0.07-0.12] 0.0012 ± 0.0066
0.0057, # [0.12-0.20] 0.0057 ± 0.0025
0.0089, # [0.20-0.30] 0.0089 ± 0.0020
0.0077, # [0.30-0.45] 0.0077 ± 0.0026
0.0042, # [0.45-0.65] 0.0038 ± 0.0042
0.0070 # [0.65-1.00] 0.0053 ± 0.0070
]
mcasym_p = [
0.0047, # [0.07-0.12] -0.0014 ± 0.0047
0.0077, # [0.12-0.20] 0.0077 ± 0.0024
0.0147, # [0.20-0.30] 0.0147 ± 0.0023
0.0105, # [0.30-0.45] 0.0105 ± 0.0024
0.0057, # [0.45-0.65] 0.0057 ± 0.0044
0.0112 # [0.65-1.00] 0.0112 ± 0.0081
]
pt_shift_m = [ 0, 0,
0.003, # [0.20-0.30] 0.006 low, 0.001 high, 0.003 avg
0.005, # [0.30-0.45] 0.007 low, 0.003 high, 0.005 avg
0.016, # [0.45-0.65] 0.020 low, 0.012 high, 0.016 avg
0.010 # [0.65-1.00] 0.011 low, 0.008 high, 0.010 avg
]
pt_shift_p = [ 0, 0,
0.004, # [0.20-0.30] 0.005 low, 0.003 high, 0.004 avg
0.007, # [0.30-0.45] 0.008 low, 0.006 high, 0.007 avg
0.016, # [0.45-0.65] 0.023 low, 0.008 high, 0.016 avg
0.016 # [0.65-1.00] 0.012 low, 0.020 high, 0.016 avg
]
relative_luminosity = 9.4e-4
minus = [0.0 for bin in zbins[:-1]]
plus = [0.0 for bin in zbins[:-1]]
start = len(zbins) == 5 and 2 or 0
for i in range(start, start+len(zbins)-1):
minus[i-start] = math.sqrt(
pow(relative_luminosity, 2) +
pow(pid_contamination*pid_asym_m[i], 2) +
pow(beam_vector*asigma_m[i], 2) +
pow(mcasym_m[i], 2) +
pow(pt_shift_m[i], 2)
)
plus[i-start] = math.sqrt(
pow(relative_luminosity, 2) +
pow(pid_contamination*pid_asym_p[i], 2) +
pow(beam_vector*asigma_p[i], 2) +
pow(mcasym_p[i], 2) +
pow(pt_shift_p[i], 2)
)
return {'minus':minus, 'plus':plus} | 71941441b09a593ebc2a3e396d2b86684bc75cfe | 8,390 |
def extract_metamap(json_, key):
"""
Task function to parse and extract concepts from json_ style dic, using
the MetaMap binary.
Input:
- json_ : dic,
json-style dictionary generated from the Parse object related
to the specific type of input
- key : str,
string denoting the type of medical text to read from. Used to
find the correct paragraph in the settings.yaml file.
Output:
- json_ : dic,
the previous json-style dictionary enriched with medical concepts
"""
# outerfield for the documents in json
docfield = settings['out']['json']['itemfield']
# textfield to read text from
textfield = settings['out']['json']['json_text_field']
N = len(json_[docfield])
for i, doc in enumerate(json_[docfield]):
text = clean_text(doc[textfield])
if len(text) > 5000:
chunks = create_text_batches(text)
results = {'text': text, 'sents': []}
sent_id = 0
for chunk in chunks:
tmp = metamap_wrapper(chunk)
for sent in tmp['sents']:
sent['sent_id'] = sent_id
sent_id += 1
results['sents'].append(sent)
else:
results = metamap_wrapper(text)
json_[docfield][i].update(results)
proc = int(i/float(N)*100)
if proc % 10 == 0 and proc > 0:
time_log('We are at %d/%d documents -- %0.2f %%' % (i, N, proc))
return json_ | 543b1470f36ee85dde2c2447ecc204544ef8fd52 | 8,391 |
def get_testinfo_by_reference(ref_name, ref_type):
""" get test content by reference name
@params:
ref_name: reference name, e.g. api_v1_Account_Login_POST($UserName, $Password)
ref_type: "api" or "suite"
"""
function_meta = parse_function(ref_name)
func_name = function_meta["func_name"]
call_args = function_meta["args"]
test_info = get_test_definition(func_name, ref_type)
def_args = test_info.get("function_meta").get("args", [])
if len(call_args) != len(def_args):
raise exception.ParamsError("call args mismatch defined args!")
args_mapping = {}
for index, item in enumerate(def_args):
if call_args[index] == item:
continue
args_mapping[item] = call_args[index]
if args_mapping:
test_info = substitute_variables_with_mapping(test_info, args_mapping)
return test_info | 967b32149ae094d2ef86cc88b2131047b98b1f09 | 8,392 |
import logging
def game_info(uuid: str) -> dict:
"""
return info about game by uuid
:param uuid:
:return: message
"""
logging.info(uuid)
logging.info(games.keys())
if UUID(uuid) in games.keys():
select_game: Game = games.get(UUID(uuid))
return {
"uuid": uuid,
"start_time": select_game.start_time,
"field": select_game.field,
}
else:
return {"Error": f"{uuid} game not found!"} | 15faaab2f256830a0f2537ada4c742f703a74783 | 8,393 |
def miller_rabin(n, a):
"""
Miller-Rabin Primality Test
Returns true if n is a (probable) prime
Returns false if n is a composite number
"""
s = 0
d = n - 1
while d % 2 == 0:
s = s + 1
d = d >> 1
x = square_and_multiply(a, d, n)
if x != 1 and x + 1 != n:
for r in range(1, s):
x = square_and_multiply(x, 2, n)
if x == 1:
return False
elif x == n - 1:
a = 0
break
if a:
return False
return True | 2de6b54c05d4052e5d2c8fd915a0a2814a7da28f | 8,394 |
def luhn_sum_v1(num):
"""
First version of luhn_sum; uses a list which it modifies in-place.
"""
nums = [int(i) for i in reversed(str(num))]
for i in xrange(1, len(nums), 2):
nums[i] *= 2
return sum(sum(divmod(i, 10)) for i in nums) | c2ff96069710a2321d9871608d0d9bbaddc18d30 | 8,395 |
def get_rate_discounted_rate(item_code, customer, company, so_number = None):
""" This function is use to get discounted rate and rate """
item_group = frappe.get_value("Item", item_code, 'item_group')
# parent_item_group = frappe.get_value("Item Group", item_group, 'parent_item_group')
count = frappe.db.sql(f"""
SELECT
COUNT(*)
FROM
`tabDelivery Note Item` as soi
JOIN
`tabDelivery Note` as so ON so.`name` = soi.`parent`
WHERE
soi.`item_group` = '{item_group}' AND
soi.`docstatus` = 1 AND
so.customer = '{customer}' AND
so.`company` = '{company}'
LIMIT 1
""")
where_clause = ''
if count[0][0]:
where_clause = f"soi.item_group = '{item_group}' AND"
data = None
if so_number:
data = frappe.db.sql(f"""
SELECT
soi.`rate` as `rate`
FROM
`tabDelivery Note Item` as soi
JOIN
`tabDelivery Note` as so ON soi.parent = so.name
WHERE
{where_clause}
so.`customer` = '{customer}' AND
so.`company` = '{company}' AND
so.`docstatus` != 2 AND
so.`name` = '{so_number}'
ORDER BY
soi.`creation` DESC
LIMIT
1
""", as_dict = True)
if not data:
data = frappe.db.sql(f"""
SELECT
soi.`rate` as `rate`
FROM
`tabDelivery Note Item` as soi JOIN
`tabDelivery Note` as so ON soi.parent = so.name
WHERE
{where_clause}
so.`customer` = '{customer}' AND
so.`company` = '{company}' AND
so.`docstatus` != 2
ORDER BY
soi.`creation` DESC
LIMIT
1
""", as_dict = True)
return data[0] if data else {'rate': 0} | 8560bf5846a0500941840d59fb79cd721196a7ca | 8,396 |
from typing import Dict
from typing import Any
import os
import logging
import json
def lambda_handler(event: Dict[str, Any], context: Dict[str, Any]) -> str:
"""
Lambda function to parse notification events and forward to Slack
:param event: lambda expected event object
:param context: lambda expected context object
:returns: none
"""
if os.environ.get("LOG_EVENTS", "False") == "True":
logging.info(f"Event logging enabled: `{json.dumps(event)}`")
for record in event["Records"]:
sns = record["Sns"]
subject = sns["Subject"]
message = sns["Message"]
region = sns["TopicArn"].split(":")[3]
payload = get_slack_message_payload(
message=message, region=region, subject=subject
)
response = send_slack_notification(payload=payload)
if json.loads(response)["code"] != 200:
response_info = json.loads(response)["info"]
logging.error(
f"Error: received status `{response_info}` using event `{event}` and context `{context}`"
)
return response | 6b6251b42063a91605b98eb31810af410dbf2617 | 8,397 |
def sum_obs_np(A):
"""summation over axis 0 (obs) equivalent to np.sum(A, 0)"""
return np.einsum("ij -> j", A) if A.ndim > 1 else np.sum(A) | c14abc00b2ea6fa64c32fe7ac10f0007cb4705e8 | 8,398 |
def _macos_command_line_infoplist_impl(ctx):
"""Implementation of the internal `macos_command_line_infoplist` rule.
This rule is an internal implementation detail of
`macos_command_line_application` and should not be used directly by clients.
It merges Info.plists as would occur for a bundle but then propagates an
`objc` provider with the necessary linkopts to embed the plist in a binary.
Args:
ctx: The rule context.
Returns:
A `struct` containing the `objc` provider that should be propagated to a
binary that should have this plist embedded.
"""
bundle_id = ctx.attr.bundle_id
infoplists = ctx.files.infoplists
if ctx.attr.version and AppleBundleVersionInfo in ctx.attr.version:
version = ctx.attr.version[AppleBundleVersionInfo]
else:
version = None
if not bundle_id and not infoplists and not version:
fail("Internal error: at least one of bundle_id, infoplists, or version " +
"should have been provided")
plist_results = plist_actions.merge_infoplists(
ctx,
None,
infoplists,
bundle_id = bundle_id,
exclude_executable_name = True,
extract_from_ctxt = True,
include_xcode_env = True,
)
merged_infoplist = plist_results.output_plist
return [
linker_support.sectcreate_objc_provider(
"__TEXT",
"__info_plist",
merged_infoplist,
),
] | d70a47def85cff62fc21e907a9f0a246a9b1c192 | 8,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.