content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def html_url(url: str, name: str = None, theme: str = "") -> str:
"""Create a HTML string for the URL and return it.
:param url: URL to set
:param name: Name of the URL, if None, use same as URL.
:param theme: "dark" or other theme.
:return: String with the correct formatting for URL
"""
if theme == "dark":
color = "#988fd4"
else:
color = "#1501a3"
if name is None:
name = url
retval = f'<a href="{url}" style="color:{color}">{name}</a>'
return retval | 74a0d3eabce4f0a53e699567e25c9d09924e3150 | 18,137 |
def get_logger_messages(loggers=[], after=0):
""" Returns messages for the specified loggers.
If given, limits the messages to those that occured after the given timestamp"""
if not isinstance(loggers, list):
loggers = [loggers]
return logger.get_logs(loggers, after) | d2c8ef6dc8f1ec0f4a5f7a1263b829f20e0dfa8b | 18,138 |
def __dir__():
"""IPython tab completion seems to respect this."""
return __all__ | d1b0fe35370412a6c0ca5d323417e4e3d1b3b603 | 18,139 |
def run_iterations(histogram_for_random_words,
histogram_for_text,
iterations):
"""Helper function for test_stochastic_sample (below).
Store the results of running the stochastic_sample function for 10,000
iterations in a histogram.
Param: histogram_for_random_words(dict): all values sum to a total of 0
histogram_for_text(dict): all values represent frequency in text
iterations(int): number of trials to run for stochastic_sample
Return: histogram_for_random_words(dict): sum of all values = 10,000
"""
unique_words = words_in_text(histogram_for_random_words)
for i in range(iterations):
word = stochastic_sample(histogram_for_text)
for key_word in unique_words:
if word == key_word:
histogram_for_random_words[word] += 1
return histogram_for_random_words | 59bd4cefd03403eee241479df19f011915419f14 | 18,140 |
def GetFootSensors():
"""Get the foot sensor values"""
# Get The Left Foot Force Sensor Values
LFsrFL = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/FrontLeft/Sensor/Value")
LFsrFR = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/FrontRight/Sensor/Value")
LFsrBL = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/RearLeft/Sensor/Value")
LFsrBR = memoryProxy.getData("Device/SubDeviceList/LFoot/FSR/RearRight/Sensor/Value")
LF=[LFsrFL, LFsrFR, LFsrBL, LFsrBR]
# Get The Right Foot Force Sensor Values
RFsrFL = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/FrontLeft/Sensor/Value")
RFsrFR = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/FrontRight/Sensor/Value")
RFsrBL = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/RearLeft/Sensor/Value")
RFsrBR = memoryProxy.getData("Device/SubDeviceList/RFoot/FSR/RearRight/Sensor/Value")
RF=[RFsrFL, RFsrFR, RFsrBL, RFsrBR]
return LF, RF | 555c5cb1f6e68571848410096144a3184d22e28a | 18,141 |
def norm(x, y):
"""
Calculate the Euclidean Distance
:param x:
:param y:
:return:
"""
return tf.sqrt(tf.reduce_sum((x - y) ** 2)) | 67766f9e3c3a510a87eff6bdea7ddf9ec2504af3 | 18,142 |
def expand_key(keylist, value):
"""
Recursive method for converting into a nested dict
Splits keys containing '.', and converts into a nested dict
"""
if len(keylist) == 0:
return expand_value(value)
elif len(keylist) == 1:
key = '.'.join(keylist)
base = dict()
base[key] = expand_value(value)
return base
else:
key = keylist[0]
value = expand_key(keylist[1:], value)
base = dict()
base[key] = expand_value(value)
return base | ac8b4bac9b686396d5d117149fb45b8bde2ac238 | 18,143 |
def _linear_sum_assignment(a, b):
"""
Given 1D arrays a and b, return the indices which specify the permutation of
b for which the element-wise distance between the two arrays is minimized.
Args:
a (array_like): 1D array.
b (array_like): 1D array.
Returns:
array_like: Indices which specify the desired permutation of b.
"""
# This is the first guess for a solution but sometimes we get duplicate
# indices so for those values we need to choose the 2nd or 3rd best
# solution. This approach can fail if there are too many elements in b which
# map tothe same element of a but it's good enough for our purposes. For a
# general solution see the Hungarian algorithm/optimal transport algorithms.
idcs_initial = jnp.argsort(jnp.abs(b - a[:, None]), axis=1)
idcs_final = jnp.repeat(999, len(a))
def f(carry, idcs_initial_row):
i, idcs_final = carry
cond1 = jnp.isin(idcs_initial_row[0], jnp.array(idcs_final))
cond2 = jnp.isin(idcs_initial_row[1], jnp.array(idcs_final))
idx_closest = jnp.where(
cond1, jnp.where(cond2, idcs_initial_row[2], idcs_initial_row[1]), idcs_initial_row[0]
)
idcs_final = idcs_final.at[i].set(idx_closest)
return (i + 1, idcs_final), idx_closest
_, res = lax.scan(f, (0, idcs_final), idcs_initial)
return res | eeecff894e8bf29de66fa2560b8fdadbf3970d6d | 18,144 |
def get_ingredients_for_slice_at_pos(pos, frame, pizza, constraints):
"""
Get the slice of pizza with its ingredients
:param pos:
:param frame:
:param pizza:
:param constraints:
:return:
"""
def _get_ingredients_for_slice_at_pos(_pos, _frame, _pizza, _max_rows, _max_cols):
if not is_valid_pos_for_frame(_pos, _frame, constraints):
return False
cur_slice = list()
for r in range(_frame['r']):
cur_slice.append(_pizza[_pos['r'] + r][_pos['c']:_pos['c'] + _frame['c']])
return cur_slice
return _get_ingredients_for_slice_at_pos(pos, frame, pizza, constraints["R"], constraints["C"]) | db1083695d6f9503b3005e57db47c15ac761a31d | 18,145 |
def merge_data_includes(tweets_data, tweets_include):
"""
Merges tweet object with other objects, i.e. media, places, users etc
"""
df_tweets_tmp = pd.DataFrame(tweets_data)
# Add key-values of a nested dictionary in df_tweets_tmp as new columns
df_tweets = flat_dict(df_tweets_tmp)
for incl in tweets_include:
df_incl = pd.DataFrame(tweets_include[incl])
if incl == 'media':
# Split each row to multiple rows for each item in media_keys list
df_tweets = df_tweets.explode('media_keys')
df_tweets = pd.merge(df_tweets, df_incl, how='left', left_on=['media_keys'], right_on=['media_key'],
suffixes=[None,'_media'])
if incl == 'places':
df_tweets = pd.merge(df_tweets, df_incl, how='left', left_on=['place_id'], right_on=['id'],
suffixes=[None,'_places'])
if incl == 'users':
df_tweets = pd.merge(df_tweets, df_incl, how='left', left_on=['author_id'], right_on=['id'],
suffixes=[None,'_users'])
return df_tweets | db8e8560bdb80bd4a57d4f0d69031e944511633f | 18,146 |
def stringify(context, mapping, thing):
"""Turn values into bytes by converting into text and concatenating them"""
if isinstance(thing, bytes):
return thing # retain localstr to be round-tripped
return b''.join(flatten(context, mapping, thing)) | c4c4503160cab3ff6a78e2fb724fd283011ce0e7 | 18,147 |
import logging
import json
def extract_from_json(json_str, verbose=False):
"""A helper function to extract data from KPTimes dataset in json format
:param: json_str: the json string
:param: verbose: bool, if logging the process of data processing
:returns: the articles and keywords for each article
:rtype: src (list of string), tgt (list of keyword list)
"""
src = []
tgt = []
for idx in range(len(json_str)):
if idx % 1000 == 0:
if verbose:
logging.info('processing idx: ', idx)
data = json.loads(json_str[idx])
article = data['abstract']
keyword = data['keyword']
keyword = keyword.split(';')
src.append(article)
tgt.append(keyword)
return src, tgt | b05120eee45a887cee5eac68febffe96fcf8d305 | 18,148 |
def split_data(data, split_ratio, data_type=DATA_TYPE_1):
"""
split data by type
"""
data_type_1 = data[data['LABEL'] == data_type]
data_type_2 = data[data['LABEL'] != data_type]
train_set = data.sample(frac=split_ratio, replace=False)
test_set = data[~data.index.isin(train_set.index)]
train_set_type_1 = data_type_1.sample(frac=split_ratio, replace=False)
test_set_type_1 = data_type_1[~data_type_1.index.isin(train_set_type_1.index)]
train_set_type_2 = data_type_2.sample(frac=split_ratio, replace=False)
test_set_type_2 = data_type_2[~data_type_2.index.isin(train_set_type_2.index)]
return train_set, test_set, train_set_type_1, test_set_type_1, train_set_type_2, test_set_type_2 | 2653ea65bbc6fa2c7c0db9ab29890f57d5254d3f | 18,149 |
def sparse_amplitude_prox(a_model, indices_target, counts_target, frame_dimensions, eps=0.5, lam=6e-1):
"""
Smooth truncated amplitude loss from Chang et al., Overlapping Domain Decomposition Methods for Ptychographic Imaging, (2020)
:param a_model: K x M1 x M2
:param indices_target: K x num_max_counts
:param counts_target: K x num_max_counts
:param frame_dimensions: 2
:return: loss (K,), grad (K x M1 x M2)
"""
threadsperblock = (256,)
blockspergrid = tuple(np.ceil(np.array(np.prod(a_model.shape)) / threadsperblock).astype(np.int))
loss = th.zeros((a_model.shape[0],), device=a_model.device, dtype=th.float32)
grad = th.ones_like(a_model)
no_count_indicator = th.iinfo(indices_target.dtype).max
sparse_amplitude_prox_kernel[blockspergrid, threadsperblock](a_model.detach(), indices_target.detach(),
counts_target.detach(), loss.detach(), grad.detach(),
no_count_indicator, eps, lam)
return loss, grad | 9a2b7c0deb2eba58cebd6f7b2198c659c1915711 | 18,151 |
from typing import Dict
from typing import Any
from typing import List
def schema_as_fieldlist(content_schema: Dict[str, Any], path: str = "") -> List[Any]:
"""Return a list of OpenAPI schema property descriptions."""
fields = []
if "properties" in content_schema:
required_fields = content_schema.get("required", ())
for prop, options in content_schema["properties"].items():
new_path = path + "." + prop if path else prop
required = (
options["required"]
if "required" in options
else prop in required_fields
)
if "type" not in options:
fields.append(FieldDescription.load(options, new_path, required))
elif options["type"] == "object":
fields.append(FieldDescription.load(options, new_path, required))
fields.extend(schema_as_fieldlist(options, path=new_path))
elif options["type"] == "array":
fields.append(FieldDescription.load(options, new_path, required))
fields.extend(
schema_as_fieldlist(options["items"], path=new_path + ".[]")
)
else:
fields.append(FieldDescription.load(options, new_path, required))
if "items" in content_schema:
new_path = path + "." + "[]" if path else "[]"
content_schema["type"] = "array of {}s".format(
deduce_type(content_schema["items"])
)
fields.append(FieldDescription.load(content_schema, new_path))
fields.extend(schema_as_fieldlist(content_schema["items"], path=new_path))
return fields | b691e74ac36a0f3904bd317acee9b9344a440cdb | 18,152 |
def shrink(filename):
"""
:param filename: str, the location of the picture
:return: img, the shrink picture
"""
img = SimpleImage(filename)
new_img = SimpleImage.blank((img.width+1) // 2, (img.height+1) // 2)
for x in range(0, img.width, 2):
for y in range(0, img.height, 2):
pixel = img.get_pixel(x, y)
new_pixel = new_img.get_pixel(x//2, y//2)
"""
For every pixel(x, y) in img, assigns the average RGB of pixel(x, y), pixel(x+1, y),
pixel(x, y+1) and pixel(x+1, y+1) to new_pixel(x//2, y//2) in new_img.
"""
if ((img.width+1) % 2 == 0 and x == img.width - 1) or ((img.height + 1) % 2 == 0 and y == img.height - 1):
# It's the end of img.width or img.height.
new_pixel.red = pixel.red
new_pixel.green = pixel.green
new_pixel.blue = pixel.blue
else:
pixel1 = img.get_pixel(x+1, y)
pixel2 = img.get_pixel(x, y+1)
pixel3 = img.get_pixel(x, y+1)
new_pixel.red = (pixel.red + pixel1.red + pixel2.red + pixel3.red) // 4
new_pixel.green = (pixel.green + pixel1.green + pixel2.green + pixel3.green) // 4
new_pixel.blue = (pixel.blue + pixel1.blue + pixel2.blue + pixel3.blue) // 4
return new_img | fad3778089b0d5f179f62fb2a40ec80fd3fe37d1 | 18,153 |
def eh_menor_que_essa_quantidade_de_caracters(palavra: str, quantidade: int) -> bool:
"""
Função para verificar se a string é menor que a quantidade de caracters informados
@param palavra: A palavra a ser verificada
@param quantidade: A quantidade de caracters que deseja verificar
@return: Retorna True em caso da palavra seja menor que a quantidade de caracters e False em caso negativo
"""
tamanho = len(palavra)
eh_menor = False
if tamanho < quantidade:
eh_menor = True
return eh_menor | 827469606b0b93b78b63686465decbbbc63b9673 | 18,154 |
import rasterstats as rs
def buffer_sampler(ds,geom,buffer,val='median',ret_gdf=False):
"""
sample values from raster at the given ICESat-2 points
using a buffer distance, and return median/mean or a full gdf ( if return gdf=True)
Inputs = rasterio dataset, Geodataframe containing points, buffer distance, output value = median/mean (default median)
and output format list of x_atc,output_value arrays (default) or full gdf
"""
ndv = get_ndv(ds)
array = ds.read(1)
gt = ds.transform
stat = val
geom = geom.to_crs(ds.crs)
x_min,y_min,x_max,y_max = ds.bounds
geom = geom.cx[x_min:x_max, y_min:y_max]
geom['geometry'] = geom.geometry.buffer(buffer)
json_stats = rs.zonal_stats(geom,array,affine=gt,geojson_out=True,stats=stat,nodata=ndv)
gdf = gpd.GeoDataFrame.from_features(json_stats)
if val =='median':
gdf = gdf.rename(columns={'median':'med'})
call = 'med'
else:
gdf = gdf.rename(columns={'mean':'avg'})
call = 'avg'
if ret_gdf:
out_file = gdf
else:
out_file = [gdf.x_atc.values,gdf[call].values]
return out_file | 8efde64c0ee49b11e484fd204cf70ae5ae322bf9 | 18,155 |
import re
def extract_int(str, start, end):
""" Returns the integer between start and end. """
val = extract_string(str, start, end)
if not val is None and re.match('^[0-9]{1,}$', val):
return int(val)
return None | ec08c15592ea7e7ab9e4a0f476a97ba2127dda85 | 18,156 |
import re
def get_pg_ann(diff, vol_num):
"""Extract pedurma page and put page annotation.
Args:
diff (str): diff text
vol_num (int): volume number
Returns:
str: page annotation
"""
pg_no_pattern = fr"{vol_num}\S*?(\d+)"
pg_pat = re.search(pg_no_pattern, diff)
try:
pg_num = pg_pat.group(1)
except Exception:
pg_num = 0
return f"<p{vol_num}-{pg_num}>" | d9ca1a760f411352d8bcbe094ac622f7dbd33d07 | 18,157 |
def check_diamond(structure):
"""
Utility function to check if the structure is fcc, bcc, hcp or diamond
Args:
structure (pyiron_atomistics.structure.atoms.Atoms): Atomistic Structure object to check
Returns:
bool: true if diamond else false
"""
cna_dict = structure.analyse.pyscal_cna_adaptive(
mode="total", ovito_compatibility=True
)
dia_dict = structure.analyse.pyscal_diamond_structure(
mode="total", ovito_compatibility=True
)
return (
cna_dict["CommonNeighborAnalysis.counts.OTHER"]
> dia_dict["IdentifyDiamond.counts.OTHER"]
) | ae082d6921757163cce3ddccbca15bf70621a092 | 18,158 |
from typing import Optional
from typing import Union
from typing import Dict
from typing import Any
from typing import List
from typing import Tuple
def compute_correlation(
df: DataFrame,
x: Optional[str] = None,
y: Optional[str] = None,
*,
cfg: Union[Config, Dict[str, Any], None] = None,
display: Optional[List[str]] = None,
value_range: Optional[Tuple[float, float]] = None,
k: Optional[int] = None,
) -> Intermediate:
# pylint: disable=too-many-arguments
"""
Parameters
----------
df
The pandas dataframe for which plots are calculated for each column.
cfg
Config instance
x
A valid column name of the dataframe
y
A valid column name of the dataframe
value_range
If the correlation value is out of the range, don't show it.
cfg: Union[Config, Dict[str, Any], None], default None
When a user call plot_correlation(), the created Config object will be passed to
compute_correlation().
When a user call compute_correlation() directly, if he/she wants to customize the output,
cfg is a dictionary for configuring. If not, cfg is None and
default values will be used for parameters.
display: Optional[List[str]], default None
A list containing the names of the visualizations to display. Only exist when
a user call compute_correlation() directly and want to customize the output
k
Choose top-k element
"""
if isinstance(cfg, dict):
cfg = Config.from_dict(display, cfg)
elif not cfg:
cfg = Config()
df = preprocess_dataframe(df)
if x is None and y is None: # pylint: disable=no-else-return
with catch_warnings():
filterwarnings(
"ignore",
"overflow encountered in long_scalars",
category=RuntimeWarning,
)
return _calc_overview(df, cfg, value_range=value_range, k=k)
elif x is not None and y is None:
with catch_warnings():
filterwarnings(
"ignore",
"overflow encountered in long_scalars",
category=RuntimeWarning,
)
return _calc_univariate(df, x, cfg, value_range=value_range, k=k)
elif x is None and y is not None:
raise ValueError("Please give the column name to x instead of y")
elif x is not None and y is not None:
return _calc_bivariate(df, cfg, x, y, k=k)
raise ValueError("Not Possible") | a8fb7f4e6cf34d584aba8e8fa9a7a7703fad8bad | 18,159 |
def radix_sort(arr):
"""Sort list of numberes with radix sort."""
if len(arr) > 1:
buckets = [[] for x in range(10)]
lst = arr
output = []
t = 0
m = len(str(max(arr)))
while m > t:
for num in lst:
if len(str(num)) >= t + 1:
for b_num in range(10):
idx = num // 10**t % 10
if idx == b_num:
buckets[b_num].append(num)
break
else:
output.append(num)
lst = []
for bucket in buckets:
lst += bucket
buckets = [[] for x in range(10)]
t += 1
output += lst
return output
else:
return arr | 517ab99483ac1c6cd18df11dc1dccb4c502cac39 | 18,160 |
def resampling(w, rs):
"""
Stratified resampling with "nograd_primitive" to ensure autograd
takes no derivatives through it.
"""
N = w.shape[0]
bins = np.cumsum(w)
ind = np.arange(N)
u = (ind + rs.rand(N))/N
return np.digitize(u, bins) | 2f3d6ae173d5e0ebdfe36cd1ab6595af7452c191 | 18,162 |
import torch
def integrated_bn(fms, bn):
"""iBN (integrated Batch Normalization) layer of SEPC."""
sizes = [p.shape[2:] for p in fms]
n, c = fms[0].shape[0], fms[0].shape[1]
fm = torch.cat([p.view(n, c, 1, -1) for p in fms], dim=-1)
fm = bn(fm)
fm = torch.split(fm, [s[0] * s[1] for s in sizes], dim=-1)
return [p.view(n, c, s[0], s[1]) for p, s in zip(fm, sizes)] | bee6d8782b372c0fb3990eefa42d51c6acacc29b | 18,163 |
def get_RF_calculations(model, criteria, calculation=None, clus="whole", too_large=None,
sgonly=False, regionalonly=False):
"""
BREAK DOWN DATA FROM CALCULATION!
or really just go pickle
"""
print(f'{utils.time_now()} - Criteria: {criteria}, calculation: {calculation}, clus: {clus}, sgonly: {sgonly}, regionalonly: {regionalonly}')
# pickling the entire dataset which is what z-score will be calculated against
if sgonly: location_tag = '_sgonly'
elif regionalonly: location_tag = '_regionalonly'
else: location_tag = ''
found = utils.find(f"{criteria}_serialized_{clus}{location_tag}.pkl", model.cluster_dir)
if found: found = found[0]
else:
# note: why each model is pickled even as a whole or even in its cluster
# is that it relieves holding in-memory these arrays
# later, these pickles are simply opened lazily when needed
print(f'"{criteria}_serialized_{clus}{location_tag}.pkl" not found.')
found = acquire_rf_subset(model, criteria, clus, location_tag)
utils.to_pickle(f"{criteria}_serialized_{clus}{location_tag}", found, model.cluster_dir)
if type(found) == str:
pkl = utils.open_pickle(found)
else: pkl = found # for when cluster-wise, this is not a path but the actual numpy array
if calculation == "mean" and len(pkl.shape) >2:
daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
return daskarr.mean(axis=0).compute() *100
elif calculation == "std" and len(pkl.shape) >2:
daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
return daskarr.std(axis=0).compute() *100
elif calculation == "90perc" and len(pkl.shape) >2:
print('got back')
if too_large:
pkl = pkl.chunk({'time':-1, 'lon':2, 'lat':2})
return pkl.quantile(0.9, dim='time').persist().values
else:
return np.percentile(pkl.values, 90, axis=0)
elif calculation == "10perc" and len(pkl.shape) >2:
print('got back')
if too_large:
pkl = pkl.chunk({'time':-1, 'lon':2, 'lat':2})
return pkl.quantile(0.1, dim='time').persist().values
else:
return np.percentile(pkl.values, 10, axis=0)
# da.map_blocks(np.percentile, pkl, axis=0, q=q)
# daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
# print('yer')
# percentile_rank_lst = []
# for p in range(pkl.shape[1]):
# for k in range(pkl.shape[2]):
# pkl_ = pkl[:, p, k]
# percentile_rank_lst.append(np.percentile(pkl_, 90))
# percentile_rank_lst = []
# for p in range(pkl.shape[1]):
# for k in range(pkl.shape[2]):
# pkl_ = pkl[:, p, k]
# percentile_rank_lst.append(np.percentile(pkl_, 90))
# daskarr = da.from_array(pkl, chunks=(500, pkl.shape[1], pkl.shape[2]))
# return da.percentile(pkl, 90).compute()
# return np.array(percentile_rank_lst).reshape(pkl.shape[1], pkl.shape[2])
else:# e.g. rf_ds_lon has None as <calculation>
return pkl | 34b44b3a525bd7cee562a63d689fc21d5a5c2a4a | 18,164 |
from plugin.helpers import log_plugin_error
import importlib
import pkgutil
def get_modules(pkg, recursive: bool = False):
"""get all modules in a package"""
if not recursive:
return [importlib.import_module(name) for finder, name, ispkg in iter_namespace(pkg)]
context = {}
for loader, name, ispkg in pkgutil.walk_packages(pkg.__path__):
try:
module = loader.find_module(name).load_module(name)
pkg_names = getattr(module, '__all__', None)
for k, v in vars(module).items():
if not k.startswith('_') and (pkg_names is None or k in pkg_names):
context[k] = v
context[name] = module
except AppRegistryNotReady:
pass
except Exception as error:
# this 'protects' against malformed plugin modules by more or less silently failing
# log to stack
log_plugin_error({name: str(error)}, 'discovery')
return [v for k, v in context.items()] | 96c48ae86a01defe054e5a4fc948c2f9cfb05660 | 18,166 |
def TransformOperationHttpStatus(r, undefined=''):
"""Returns the HTTP response code of an operation.
Args:
r: JSON-serializable object.
undefined: Returns this value if there is no response code.
Returns:
The HTTP response code of the operation in r.
"""
if resource_transform.GetKeyValue(r, 'status', None) == 'DONE':
return (resource_transform.GetKeyValue(r, 'httpErrorStatusCode', None) or
200) # httplib.OK
return undefined | e840575ccbe468e6b3bc9d5dfb725751bd1a1464 | 18,167 |
import warnings
def split_record_fields(items, content_field, itemwise=False):
"""
This functionality has been moved to :func:`split_records()`, and this is just
a temporary alias for that other function. You should use it instead of this.
"""
warnings.warn(
"`split_record_fields()` has been renamed `split_records()`, "
"and this function is just a temporary alias for it.",
DeprecationWarning,
)
return split_records(items, content_field, itemwise=False) | 256efc34bced15c5694fac2a7c4c1003214a54c5 | 18,168 |
import scipy
import numpy
def prony(signal):
"""Estimates amplitudes and phases of a sparse signal using Prony's method.
Single-ancilla quantum phase estimation returns a signal
g(k)=sum (aj*exp(i*k*phij)), where aj and phij are the amplitudes
and corresponding eigenvalues of the unitary whose phases we wish
to estimate. When more than one amplitude is involved, Prony's method
provides a simple estimation tool, which achieves near-Heisenberg-limited
scaling (error scaling as N^{-1/2}K^{-3/2}).
Args:
signal(1d complex array): the signal to fit
Returns:
amplitudes(list of complex values): the amplitudes a_i,
in descending order by their complex magnitude
phases(list of complex values): the complex frequencies gamma_i,
correlated with amplitudes.
"""
num_freqs = len(signal) // 2
hankel0 = scipy.linalg.hankel(c=signal[:num_freqs],
r=signal[num_freqs - 1:-1])
hankel1 = scipy.linalg.hankel(c=signal[1:num_freqs + 1],
r=signal[num_freqs:])
shift_matrix = scipy.linalg.lstsq(hankel0.T, hankel1.T)[0]
phases = numpy.linalg.eigvals(shift_matrix.T)
generation_matrix = numpy.array(
[[phase**k for phase in phases] for k in range(len(signal))])
amplitudes = scipy.linalg.lstsq(generation_matrix, signal)[0]
amplitudes, phases = zip(*sorted(
zip(amplitudes, phases), key=lambda x: numpy.abs(x[0]), reverse=True))
return numpy.array(amplitudes), numpy.array(phases) | 50bbcd05b1e541144207762052de9de783089bad | 18,170 |
def _check_alignment(beh_events, alignment, candidates, candidates_set,
resync_i, check_i=None):
"""Check the alignment, account for misalignment accumulation."""
check_i = resync_i if check_i is None else check_i
beh_events = beh_events.copy() # don't modify original
events = np.zeros((beh_events.size))
start = np.argmin([abs(beh_e - candidates).min()
for beh_e in beh_events + alignment])
for i, beh_e in enumerate(beh_events[start:]):
error, events[start + i] = \
_event_dist(beh_e + alignment, candidates_set, candidates[-1],
check_i)
if abs(error) <= resync_i and start + i + 1 < beh_events.size:
beh_events[start + i + 1:] -= error
for i, beh_e in enumerate(beh_events[:start][::-1]):
error, events[start - i - 1] = \
_event_dist(beh_e + alignment, candidates_set, candidates[-1],
check_i)
if abs(error) <= resync_i and start - i - 2 > 0:
beh_events[:start - i - 2] -= error
return beh_events, events | e4508e90f11bb5b10619d19066a5fb51c36365b3 | 18,171 |
def user_info():
"""
个人中心基本资料展示
1、尝试获取用户信息
user = g.user
2、如果用户未登录,重定向到项目首页
3、如果用户登录,获取用户信息
4、把用户信息传给模板
:return:
"""
user = g.user
if not user:
return redirect('/')
data = {
'user': user.to_dict()
}
return render_template('blogs/user.html', data=data) | cb8d9c2081c8a26a82a451ce0f4de22fc1a43845 | 18,172 |
def build_config_tests_list():
"""Build config tests list"""
names,_,_,_ = zip(*config_tests)
return names | df190ec4926af461f15145bc25314a397d0be52b | 18,173 |
def annotate_filter(**decargs):
"""Add input and output watermarks to filtered events."""
def decorator(func):
"""Annotate events with entry and/or exit timestamps."""
def wrapper(event, *args, **kwargs):
"""Add enter and exit annotations to the processed event."""
funcname = ":".join([func.__module__, func.__name__])
enter_key = funcname + "|enter"
annotate_event(event, enter_key, **decargs)
out = func(event, *args, **kwargs)
exit_key = funcname + "|exit"
annotate_event(event, exit_key, **decargs)
return out
return wrapper
return decorator | e1ce16e46f17948bdb1eae3ac8e5884fe6553283 | 18,175 |
def cplot(*args,**kwargs):
"""
cplot - Plot on the current graphe
This is an "alias" to gcf().gca().plot()
"""
return(gcf().gca().plot(*args,**kwargs)) | b7725569d19520c0e85f3a48d30800c3822cdac2 | 18,176 |
from datetime import datetime
def need_to_flush_metrics(time_now):
"""Check if metrics need flushing, and update the timestamp of last flush.
Even though the caller of this function may not successfully flush the
metrics, we still update the last_flushed timestamp to prevent too much work
being done in user requests.
Also, this check-and-update has to happen atomically, to ensure only one
thread can flush metrics at a time.
"""
if not interface.state.flush_enabled_fn():
return False
datetime_now = datetime.datetime.utcfromtimestamp(time_now)
minute_ago = datetime_now - datetime.timedelta(seconds=60)
with _flush_metrics_lock:
if interface.state.last_flushed > minute_ago:
return False
interface.state.last_flushed = datetime_now
return True | a2f50927a61eecee9448661f87f08a99caa4a22c | 18,177 |
def create_instances_from_lists(x, y=None, name="data"):
"""
Allows the generation of an Instances object from a list of lists for X and a list for Y (optional).
All data must be numerical. Attributes can be converted to nominal with the
weka.filters.unsupervised.attribute.NumericToNominal filter.
:param x: the input variables (row wise)
:type x: list of list
:param y: the output variable (optional)
:type y: list
:param name: the name of the dataset
:type name: str
:return: the generated dataset
:rtype: Instances
"""
if y is not None:
if len(x) != len(y):
raise Exception("Dimensions of x and y differ: " + str(len(x)) + " != " + str(len(y)))
# create header
atts = []
for i in xrange(len(x[0])):
atts.append(Attribute.create_numeric("x" + str(i+1)))
if y is not None:
atts.append(Attribute.create_numeric("y"))
result = Instances.create_instances(name, atts, len(x))
# add data
for i in xrange(len(x)):
values = x[i][:]
if y is not None:
values.append(y[i])
result.add_instance(Instance.create_instance(values))
return result | 310d72cb9fe5f65d85b19f9408e670426ebf7fdd | 18,179 |
def median_filter_(img, mask):
"""
Applies a median filer to all channels
"""
ims = []
for d in range(3):
img_conv_d = median_filter(img[:,:,d], size=(mask,mask))
ims.append(img_conv_d)
return np.stack(ims, axis=2).astype("uint8") | 2d7909b974572711901f84806009f237ecafaadf | 18,181 |
def replace_lines(inst, clean_lines, norm_lines):
"""
Given an instance and a list of clean lines and normal lines,
add a cleaned tier and normalized if they do not already exist,
otherwise, replace them.
:param inst:
:type inst: xigt.Igt
:param clean_lines:
:type clean_lines: list[dict]
:param norm_lines:
:type norm_lines: list[dict]
"""
# -------------------------------------------
# Remove the old clean/norm lines.
# -------------------------------------------
old_clean_tier = cleaned_tier(inst)
if old_clean_tier is not None:
inst.remove(old_clean_tier)
old_norm_tier = normalized_tier(inst)
if old_norm_tier is not None:
inst.remove(old_norm_tier)
# -------------------------------------------
# Now, add the clean/norm lines, if provided.
# -------------------------------------------
if clean_lines:
new_clean_tier = create_text_tier_from_lines(inst, clean_lines, CLEAN_ID, CLEAN_STATE)
inst.append(new_clean_tier)
if norm_lines:
new_norm_tier = create_text_tier_from_lines(inst, norm_lines, NORM_ID, NORM_STATE)
inst.append(new_norm_tier)
return inst | 39f3fdcd40eafd32e071b54c9ab032104fba8c7c | 18,182 |
from pathlib import Path
def get_html(link: Link, path: Path) -> str:
"""
Try to find wget, singlefile and then dom files.
If none is found, download the url again.
"""
canonical = link.canonical_outputs()
abs_path = path.absolute()
sources = [canonical["singlefile_path"], canonical["wget_path"], canonical["dom_path"]]
document = None
for source in sources:
try:
with open(abs_path / source, "r", encoding="utf-8") as f:
document = f.read()
break
except (FileNotFoundError, TypeError):
continue
if document is None:
return download_url(link.url)
else:
return document | 3624e3df219cc7d6480747407ad7de3ec702813e | 18,183 |
def normalization(X,degree):
""" A scaling technique in which values
are shifted and rescaled so that they
end up ranging between 0 and 1.
It is also known as Min-Max scaling
----------------------------------------
degree: polynomial regression degree, or attribute/feature number
"""
X[:, :(degree)] = (X[:, :(degree)] - np.amin(X[:, :(degree)], axis = 0))/ \
(np.amax(X[:, :(degree)], axis = 0) - np.amin(X[:, :(degree)], axis = 0))
return X | 9cdef8b4b7e7a31523311ce6f4a668c6039ad2a1 | 18,184 |
def get_tags_from_match(child_span_0, child_span_1, tags):
"""
Given two entities spans,
check if both are within one of the tags span,
and return the first match or O
"""
match_tags = []
for k, v in tags.items():
parent_span = (v["start"], v["end"])
if parent_relation(child_span_0, child_span_1, parent_span):
match_tags.append(v["tag"])
return match_tags[0] if match_tags else "O" | c7ad037d2c40b6316006b4c7dda2fd9d02640f6e | 18,185 |
def _rfc822_escape(header):
"""Return a version of the string escaped for inclusion in an
RFC-822 header, by ensuring there are 8 spaces space after each newline.
"""
lines = header.split('\n')
header = ('\n' + 8 * ' ').join(lines)
return header | 1a3cd02b057742db00ed741c40947cf4e19d1a86 | 18,186 |
import socket
def getCgiBaseHref():
"""Return value for <cgiBaseHref/> configuration parameter."""
val = sciflo.utils.ScifloConfigParser().getParameter('cgiBaseHref')
if val is None:
val = "http://%s/sciflo/cgi-bin/" % socket.getfqdn()
return val | 62b5bc3d528c6db64ff8899c2847d2b0ecb4021d | 18,187 |
def dijkstra(gph: GraphState,
algo: AlgoState,
txt: VisText,
start: Square,
end: Square,
ignore_node: Square = None,
draw_best_path: bool = True,
visualize: bool = True) \
-> [dict, bool]:
"""Code for the dijkstra algorithm"""
# Used to determine the order of squares to check. Order of args helper decide the priority.
queue_pos: int = 0
open_set = PriorityQueue()
open_set.put((0, queue_pos, start))
open_set_hash: set = {start}
# Determine what is the best square to check
g_score: dict = {square: float('inf') for row in gph.graph for square in row}
g_score[start] = 0
# Keeps track of next node for every node in graph. A linked list basically.
came_from: dict = {}
# Continues until every node has been checked or best path found
i = 0
while not open_set.empty():
# If uses closes window the program terminates
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
# Gets the square currently being checked
curr_square: Square = open_set.get()[2]
open_set_hash.remove(curr_square)
# Terminates if found the best path
if curr_square == end:
if draw_best_path:
best_path(gph, algo, txt, came_from, end, visualize=visualize)
return True
return came_from
# Decides the order of neighbours to check
for nei in curr_square.neighbours:
temp_g_score: int = g_score[curr_square] + 1
if temp_g_score < g_score[nei]:
came_from[nei] = curr_square
g_score[nei] = temp_g_score
if nei not in open_set_hash:
queue_pos += 1
open_set.put((g_score[nei], queue_pos, nei))
open_set_hash.add(nei)
if nei != end and nei.color != CLOSED_COLOR and nei != ignore_node:
nei.set_open()
# Only visualize if called. Checks if square is closed to not repeat when mid node included.
i += 1
if visualize and not curr_square.is_closed():
if i % gph.speed_multiplier == 0:
i = 0
draw(gph, txt, display_update=False)
draw_vis_text(txt, is_dijkstra=True)
# Sets square to closed after finished checking
if curr_square != start and curr_square != ignore_node:
curr_square.set_closed()
return False | cbc69734278e7ab4b0c609a1bfab5a9280bedee4 | 18,189 |
def nowIso8601():
"""
Returns time now in ISO 8601 format
use now(timezone.utc)
YYYY-MM-DDTHH:MM:SS.ffffff+HH:MM[:SS[.ffffff]]
.strftime('%Y-%m-%dT%H:%M:%S.%f%z')
'2020-08-22T17:50:09.988921+00:00'
Assumes TZ aware
For nanosecond use instead attotime or datatime64 in pandas or numpy
"""
return (nowUTC().isoformat(timespec='microseconds')) | c5290e5a60f708f19d1cecf74c9cd927b4750ca3 | 18,191 |
def get_trip_data(tripdata_path, output_path, start=None, stop=None):
"""
Read raw tripdata csv and filter unnecessary info.
1 - Check if output path exists
2 - If output path does not exist
2.1 - Select columns ("pickup_datetime",
"passenger_count",
"pickup_longitude",
"pickup_latitude",
"dropoff_longitude",
"dropoff_latitude")
2.2 - If start and stop are not None, get excerpt
3 - Save clean tripdata in a csv
3 - Return dataframe
Arguments:
tripdata_path {string} -- Raw trip data csv path
output_path {string} -- Cleaned trip data csv path
start {string} -- Datetime where tripdata should start (e.g., 2011-02-01 12:23:00)
stop {string} -- Datetime where tripdata should end (e.g., 2011-02-01 14:00:00)
Returns:
Dataframe -- Cleaned tripdata dataframe
"""
print("files:", output_path, tripdata_path)
# Trip data dataframe (Valentine's day)
tripdata_dt_excerpt = None
try:
# Load tripdata
tripdata_dt_excerpt = pd.read_csv(
output_path, parse_dates=True, index_col="pickup_datetime")
print("Loading file '{}'.".format(output_path))
except:
# Columns used
filtered_columns = ["pickup_datetime",
"passenger_count",
"pickup_longitude",
"pickup_latitude",
"dropoff_longitude",
"dropoff_latitude"]
# Reading file
tripdata_dt = pd.read_csv(tripdata_path,
parse_dates=True,
index_col="pickup_datetime",
usecols=filtered_columns,
na_values='0')
tripdata_dt_excerpt = None
# Get excerpt
if start and stop:
tripdata_dt_excerpt = pd.DataFrame(
tripdata_dt.loc[(tripdata_dt.index >= start) & (tripdata_dt.index <= stop)])
else:
tripdata_dt_excerpt = pd.DataFrame(tripdata_dt)
# Remove None values
tripdata_dt_excerpt.dropna(inplace=True)
# Sort
tripdata_dt_excerpt.sort_index(inplace=True)
# Save day data
tripdata_dt_excerpt.to_csv(output_path)
return tripdata_dt_excerpt | 3aca0b89d1e747ae1ea3e5ea9f3fa0d63a5b9447 | 18,192 |
import urllib
def _qparams2url(qparams):
"""
parse qparams to make url segment
:param qparams:
:return: parsed url segment
"""
try:
if qparams == []:
return ""
assert len(qparams) == 4
num = len(qparams[0][1])
path=""
for i in range(num):
for j in range(4):
path += str(qparams[j][0]) + '=' + str(qparams[j][1][i]) + "&"
path = path[:-1]
return path
except:
return urllib.parse.urlencode(qparams, doseq=True) | ac416dd0dac87210fef5aa1bea97a60c84df60cf | 18,193 |
import itertools
def confusion_matrix(y_pred: IntTensor,
y_true: IntTensor,
normalize: bool = True,
labels: IntTensor = None,
title: str = 'Confusion matrix',
cmap: str = 'Blues',
show: bool = True):
"""Plot confusion matrix
Args:
y_pred: Model prediction returned by `model.match()`
y_true: Expected class_id.
normalize: Normalizes matrix values between 0 and 1.
Defaults to True.
labels: List of class string label to display instead of the class
numerical ids. Defaults to None.
title: Title of the confusion matrix. Defaults to 'Confusion matrix'.
cmap: Color schema as CMAP. Defaults to 'Blues'.
show: If the plot is going to be shown or not. Defaults to True.
"""
with tf.device("/cpu:0"):
# Ensure we are working with integer tensors.
y_pred = tf.cast(tf.convert_to_tensor(y_pred), dtype='int32')
y_true = tf.cast(tf.convert_to_tensor(y_true), dtype='int32')
cm = tf.math.confusion_matrix(y_true, y_pred)
cm = tf.cast(cm, dtype='float')
accuracy = tf.linalg.trace(cm) / tf.math.reduce_sum(cm)
misclass = 1 - accuracy
if normalize:
cm = tf.math.divide_no_nan(
cm,
tf.math.reduce_sum(cm, axis=1)[:, np.newaxis]
)
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if labels is not None:
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
cm_max = tf.math.reduce_max(cm)
thresh = cm_max / 1.5 if normalize else cm_max / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
val = cm[i, j]
color = "white" if val > thresh else "black"
txt = "%.2f" % val if val > 0.0 else "0"
plt.text(j, i, txt, horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(
accuracy, misclass))
if show:
plt.show()
else:
return plt | 744642cde03696f6ecbccc6f702e3f9a3cb67451 | 18,194 |
def from_smiles(smiles: str) -> Molecule:
"""Load a molecule from SMILES."""
return cdk.fromSMILES(smiles) | a5315eeb9ffadff16b90db32ca07714fe1573cda | 18,195 |
from typing import Dict
from typing import Any
def parse_template_config(template_config_data: Dict[str, Any]) -> EmailTemplateConfig:
"""
>>> from tests import doctest_utils
>>> convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
>>> parse_template_config({}) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> parse_template_config({
... 'subject': 'blah',
... 'body': 'blah',
... }) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ImproperlyConfigured
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt',
... 'rest_registration/register/body.html',
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'html_body': 'rest_registration/register/body.html',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'text_body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.txt',
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.txt', None,
... identity))
OK
>>> doctest_utils.equals(
... parse_template_config({
... 'subject': 'rest_registration/register/subject.txt',
... 'body': 'rest_registration/register/body.html',
... 'is_html': True,
... }),
... EmailTemplateConfig(
... 'rest_registration/register/subject.txt',
... 'rest_registration/register/body.html',
... 'rest_registration/register/body.html',
... convert_html_to_text))
OK
"""
try:
subject_template_name = template_config_data['subject']
except KeyError:
raise ImproperlyConfigured(_("No 'subject' key found")) from None
body_template_name = template_config_data.get('body')
text_body_template_name = template_config_data.get('text_body')
html_body_template_name = template_config_data.get('html_body')
is_html_body = template_config_data.get('is_html')
convert_html_to_text = registration_settings.VERIFICATION_EMAIL_HTML_TO_TEXT_CONVERTER # noqa: E501
if html_body_template_name and text_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=text_body_template_name,
html_body_template_name=html_body_template_name,
text_body_processor=identity,
)
elif html_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=html_body_template_name,
html_body_template_name=html_body_template_name,
text_body_processor=convert_html_to_text,
)
elif text_body_template_name:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=text_body_template_name,
html_body_template_name=None,
text_body_processor=identity,
)
elif body_template_name:
if is_html_body:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=body_template_name,
html_body_template_name=body_template_name,
text_body_processor=convert_html_to_text,
)
else:
config = EmailTemplateConfig(
subject_template_name=subject_template_name,
text_body_template_name=body_template_name,
html_body_template_name=None,
text_body_processor=identity,
)
else:
raise ImproperlyConfigured(
'Could not parse template config data: {template_config_data}'.format( # noqa: E501
template_config_data=template_config_data))
_validate_template_name_existence(config.subject_template_name)
_validate_template_name_existence(config.text_body_template_name)
if config.html_body_template_name:
_validate_template_name_existence(config.html_body_template_name)
assert callable(config.text_body_processor)
return config | adea58fd8e8a16ec4fd48ef68aa2ff1c6356bd0d | 18,196 |
import json
def stringify_message(message):
"""Return a JSON message that is alphabetically sorted by the key name
Args:
message
"""
return json.dumps(message, sort_keys=True, separators=(',', ':')) | ccd51481627449345ba70fbf45d8069deca0f064 | 18,197 |
import numpy as np
def compute_similarity_transform(X, Y, compute_optimal_scale=False):
"""
A port of MATLAB's `procrustes` function to Numpy.
Adapted from http://stackoverflow.com/a/18927641/1884420
Args
X: array NxM of targets, with N number of points and M point dimensionality
Y: array NxM of inputs
compute_optimal_scale: whether we compute optimal scale or force it to be 1
Returns:
d: squared error after transformation
Z: transformed Y
T: computed rotation
b: scaling
c: translation
"""
muX = X.mean(0)
muY = Y.mean(0)
X0 = X - muX
Y0 = Y - muY
ssX = (X0**2.).sum()
ssY = (Y0**2.).sum()
# centred Frobenius norm
normX = np.sqrt(ssX)
normY = np.sqrt(ssY)
# scale to equal (unit) norm
X0 = X0 / normX
Y0 = Y0 / normY
# optimum rotation matrix of Y
A = np.dot(X0.T, Y0)
U,s,Vt = np.linalg.svd(A,full_matrices=False)
V = Vt.T
T = np.dot(V, U.T)
# Make sure we have a rotation
detT = np.linalg.det(T)
V[:,-1] *= np.sign( detT )
s[-1] *= np.sign( detT )
T = np.dot(V, U.T)
traceTA = s.sum()
if compute_optimal_scale: # Compute optimum scaling of Y.
b = traceTA * normX / normY
d = 1 - traceTA**2
Z = normX*traceTA*np.dot(Y0, T) + muX
else: # If no scaling allowed
b = 1
d = 1 + ssY/ssX - 2 * traceTA * normY / normX
Z = normY*np.dot(Y0, T) + muX
c = muX - b*np.dot(muY, T)
return d, Z, T, b, c | 10da3df241ec140de86b2307f9fc097b4f926407 | 18,198 |
def simplefenestration(idf, fsd, deletebsd=True, setto000=False):
"""convert a bsd (fenestrationsurface:detailed) into a simple
fenestrations"""
funcs = (window,
door,
glazeddoor,)
for func in funcs:
fenestration = func(idf, fsd, deletebsd=deletebsd, setto000=setto000)
if fenestration:
return fenestration
return None | b72e73a22756e80981d308b54037510354a5d327 | 18,199 |
from typing import TextIO
from typing import Set
def observe_birds(observations_file: TextIO) -> Set[str]:
"""Return a set of the bird species listed in observations_file, which has one bird species per line.
>>> file = StringIO("bird 1\\nbird 2\\nbird 1\\n")
>>> birds = observe_birds(file)
>>> 'bird 1' in birds
True
>>> 'bird 2' in birds
True
>>> len(birds) == 2
True
"""
birds_observed=set()
for line in observations_file:
bird = line.strip()
birds_observed.add(bird)
return birds_observed | e3ea90e8da4488121ec1ae75c4aa116646db08f5 | 18,200 |
def convert_sheet(sheet, result_dict, is_enum_mode=False):
"""
转换单个sheet的数据
Args:
sheet: openpyxl.worksheet.worksheet.Worksheet
result_dict: [dict]结果都存在这里, key为data_name,value为sheet_result
is_enum_mode: [bool]是否为enum导表模式
Returns:
bool, 是否成功
"""
if is_enum_mode:
data_name = convert.excel_handler.get_enum_class_name(sheet)
else:
data_name = convert.excel_handler.get_data_name(sheet)
sheet_name = convert.excel_handler.get_sheet_name(sheet)
if not data_name:
ec_converter.logger.info('sheet \'%s\' 的data名字为空或不符合命名规则,不导表', sheet_name)
return True
if data_name in result_dict:
ec_converter.logger.error('data名字 \'%s\' 重复, sheet name = \'%s\'', data_name, sheet_name)
return False
name_schema_dict = {}
col_schema_dict = {}
if not _get_sheet_schema_meta_info(sheet, name_schema_dict, col_schema_dict):
ec_converter.logger.error('sheet \'%s\' 获取字段信息失败', sheet_name)
return False
sheet_result = result_dict.setdefault(data_name, convert.sheet_result.SheetResult(data_name))
sheet_result.name_schema_dict = name_schema_dict
sheet_result.col_schema_dict = col_schema_dict
for row_data in convert.excel_handler.get_row_generator(sheet, settings.ROW_OFFSET):
if not _convert_row(row_data, sheet_name, sheet_result):
return False
return True | 284f470844b6722941d0e4725e4c23b1473b08df | 18,201 |
def bytes_to_int(b: bytes, order: str = 'big') -> int:
"""Convert bytes 'b' to an int."""
return int.from_bytes(b, order) | c959683787e03cc956b5abffc814f98cf4722397 | 18,203 |
def fit_model(params,param_names,lam_gal,galaxy,noise,gal_temp,
feii_tab,feii_options,
temp_list,temp_fft,npad,line_profile,fwhm_gal,velscale,npix,vsyst,run_dir,
fit_type,output_model):
"""
Constructs galaxy model by convolving templates with a LOSVD given by
a specified set of velocity parameters.
Parameters:
pars: parameters of Markov-chain
lam_gal: wavelength vector used for continuum model
temp_fft: the Fourier-transformed templates
npad:
velscale: the velocity scale in km/s/pixel
npix: number of output pixels; must be same as galaxy
vsyst: dv; the systematic velocity fr
"""
# Construct dictionary of parameter names and their respective parameter values
# param_names = [param_dict[key]['name'] for key in param_dict ]
# params = [param_dict[key]['init'] for key in param_dict ]
keys = param_names
values = params
p = dict(zip(keys, values))
c = 299792.458 # speed of light
host_model = np.copy(galaxy)
comp_dict = {}
# Perform linear interpolation on the fwhm_gal array as a function of wavelength
# We will use this to determine the fwhm resolution as a fucntion of wavelenth for each
# emission line so we can correct for the resolution at every iteration.
fwhm_gal_ftn = interp1d(lam_gal,fwhm_gal,kind='linear',bounds_error=False,fill_value=(0,0))
# Re-directed line_profile function
def line_model(line_profile,*args):
"""
This function maps the user-chosen line profile
to the correct line_model
"""
if (line_profile=='Gaussian'):
line = gaussian(*args)
return line
elif (line_profile=='Lorentzian'):
line = lorentzian(*args)
return line
############################# Power-law Component ######################################################
# if all(comp in param_names for comp in ['power_amp','power_slope','power_break'])==True:
if all(comp in param_names for comp in ['power_amp','power_slope'])==True:
# Create a template model for the power-law continuum
# power = simple_power_law(lam_gal,p['power_amp'],p['power_slope'],p['power_break']) #
power = simple_power_law(lam_gal,p['power_amp'],p['power_slope']) #
host_model = (host_model) - (power) # Subtract off continuum from galaxy, since we only want template weights to be fit
comp_dict['power'] = {'comp':power,'pcolor':'xkcd:orange red','linewidth':1.0}
########################################################################################################
############################# Fe II Component ##########################################################
if (feii_tab is not None):
if (feii_options['template']['type']=='VC04'):
# Unpack feii_tab
na_feii_tab = (feii_tab[0],feii_tab[1])
br_feii_tab = (feii_tab[2],feii_tab[3])
# Parse FeII options
if (feii_options['amp_const']['bool']==False): # if amp not constant
na_feii_amp = p['na_feii_amp']
br_feii_amp = p['br_feii_amp']
elif (feii_options['amp_const']['bool']==True): # if amp constant
na_feii_amp = feii_options['amp_const']['na_feii_val']
br_feii_amp = feii_options['amp_const']['br_feii_val']
if (feii_options['fwhm_const']['bool']==False): # if amp not constant
na_feii_fwhm = p['na_feii_fwhm']
br_feii_fwhm = p['br_feii_fwhm']
elif (feii_options['fwhm_const']['bool']==True): # if amp constant
na_feii_fwhm = feii_options['fwhm_const']['na_feii_val']
br_feii_fwhm = feii_options['fwhm_const']['br_feii_val']
if (feii_options['voff_const']['bool']==False): # if amp not constant
na_feii_voff = p['na_feii_voff']
br_feii_voff = p['br_feii_voff']
elif (feii_options['voff_const']['bool']==True): # if amp constant
na_feii_voff = feii_options['voff_const']['na_feii_val']
br_feii_voff = feii_options['voff_const']['br_feii_val']
na_feii_template = VC04_feii_template(lam_gal,fwhm_gal,na_feii_tab,na_feii_amp,na_feii_fwhm,na_feii_voff,velscale,run_dir)
br_feii_template = VC04_feii_template(lam_gal,fwhm_gal,br_feii_tab,br_feii_amp,br_feii_fwhm,br_feii_voff,velscale,run_dir)
host_model = (host_model) - (na_feii_template) - (br_feii_template)
comp_dict['na_feii_template'] = {'comp':na_feii_template,'pcolor':'xkcd:yellow','linewidth':1.0}
comp_dict['br_feii_template'] = {'comp':br_feii_template,'pcolor':'xkcd:orange','linewidth':1.0}
elif (feii_options['template']['type']=='K10'):
# Unpack tables for each template
f_trans_tab = (feii_tab[0],feii_tab[1],feii_tab[2])
s_trans_tab = (feii_tab[3],feii_tab[4],feii_tab[5])
g_trans_tab = (feii_tab[6],feii_tab[7],feii_tab[8])
z_trans_tab = (feii_tab[9],feii_tab[10])
# Parse FeII options
if (feii_options['amp_const']['bool']==False): # if amp not constant
f_feii_amp = p['feii_f_amp']
s_feii_amp = p['feii_s_amp']
g_feii_amp = p['feii_g_amp']
z_feii_amp = p['feii_z_amp']
elif (feii_options['amp_const']['bool']==True): # if amp constant
f_feii_amp = feii_options['amp_const']['f_feii_val']
s_feii_amp = feii_options['amp_const']['s_feii_val']
g_feii_amp = feii_options['amp_const']['g_feii_val']
z_feii_amp = feii_options['amp_const']['z_feii_val']
#
if (feii_options['fwhm_const']['bool']==False): # if fwhm not constant
feii_fwhm = p['feii_fwhm']
elif (feii_options['fwhm_const']['bool']==True): # if fwhm constant
feii_fwhm = feii_options['fwhm_const']['val']
#
if (feii_options['voff_const']['bool']==False): # if voff not constant
feii_voff = p['feii_voff']
elif (feii_options['voff_const']['bool']==True): # if voff constant
feii_voff = feii_options['voff_const']['val']
#
if (feii_options['temp_const']['bool']==False): # if temp not constant
feii_temp = p['feii_temp']
elif (feii_options['temp_const']['bool']==True): # if temp constant
feii_temp = feii_options['temp_const']['val']
f_trans_feii_template = K10_feii_template(lam_gal,'F',fwhm_gal,f_trans_tab,f_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir)
s_trans_feii_template = K10_feii_template(lam_gal,'S',fwhm_gal,s_trans_tab,s_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir)
g_trans_feii_template = K10_feii_template(lam_gal,'G',fwhm_gal,g_trans_tab,g_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir)
z_trans_feii_template = K10_feii_template(lam_gal,'IZw1',fwhm_gal,z_trans_tab,z_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir)
host_model = (host_model) - (f_trans_feii_template) - (s_trans_feii_template) - (g_trans_feii_template) - (z_trans_feii_template)
comp_dict['F_feii_template'] = {'comp':f_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0}
comp_dict['S_feii_template'] = {'comp':s_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0}
comp_dict['G_feii_template'] = {'comp':g_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0}
comp_dict['Z_feii_template'] = {'comp':z_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0}
########################################################################################################
############################# Emission Line Components #################################################
# Narrow lines
#### [OII]3727,3729 #################################################################################
if all(comp in param_names for comp in ['na_oii3727_core_amp','na_oii3727_core_fwhm','na_oii3727_core_voff','na_oii3729_core_amp'])==True:
# Narrow [OII]3727
na_oii3727_core_center = 3727.092 # Angstroms
na_oii3727_core_amp = p['na_oii3727_core_amp'] # flux units
na_oii3727_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3727_core_center,p['na_oii3727_core_voff'])
na_oii3727_core_fwhm = np.sqrt(p['na_oii3727_core_fwhm']**2+(na_oii3727_core_fwhm_res)**2) # km/s
na_oii3727_core_voff = p['na_oii3727_core_voff'] # km/s
na_oii3727_core = gaussian(lam_gal,na_oii3727_core_center,na_oii3727_core_amp,na_oii3727_core_fwhm,na_oii3727_core_voff,velscale)
host_model = host_model - na_oii3727_core
comp_dict['na_oii3727_core'] = {'comp':na_oii3727_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OII]3729
na_oii3729_core_center = 3729.875 # Angstroms
na_oii3729_core_amp = p['na_oii3729_core_amp'] # flux units
na_oii3729_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3729_core_center,na_oii3727_core_voff)
na_oii3729_core_fwhm = np.sqrt(p['na_oii3727_core_fwhm']**2+(na_oii3729_core_fwhm_res)**2) # km/s # km/s
na_oii3729_core_voff = na_oii3727_core_voff # km/s
na_oii3729_core = gaussian(lam_gal,na_oii3729_core_center,na_oii3729_core_amp,na_oii3729_core_fwhm,na_oii3729_core_voff,velscale)
host_model = host_model - na_oii3729_core
comp_dict['na_oii3729_core'] = {'comp':na_oii3729_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, and includes [OIII]5007
elif (all(comp in param_names for comp in ['na_oii3727_core_amp','na_oii3727_core_voff','na_oii3729_core_amp','na_oiii5007_core_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_neiii_core_fwhm','na_Hg_fwhm','oiii4363_core_fwhm'])==True):
# Narrow [OII]3727
na_oii3727_core_center = 3727.092 # Angstroms
na_oii3727_core_amp = p['na_oii3727_core_amp'] # flux units
na_oii3727_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3727_core_center,p['na_oii3727_core_voff'])
na_oii3727_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oii3727_core_fwhm_res)**2) # km/s
na_oii3727_core_voff = p['na_oii3727_core_voff'] # km/s
na_oii3727_core = gaussian(lam_gal,na_oii3727_core_center,na_oii3727_core_amp,na_oii3727_core_fwhm,na_oii3727_core_voff,velscale)
host_model = host_model - na_oii3727_core
comp_dict['na_oii3727_core'] = {'comp':na_oii3727_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OII]3729
na_oii3729_core_center = 3729.875 # Angstroms
na_oii3729_core_amp = p['na_oii3729_core_amp'] # flux units
na_oii3729_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3729_core_center,na_oii3727_core_voff)
na_oii3729_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oii3729_core_fwhm_res)**2) # km/s
na_oii3729_core_voff = na_oii3727_core_voff # km/s
na_oii3729_core = gaussian(lam_gal,na_oii3729_core_center,na_oii3729_core_amp,na_oii3729_core_fwhm,na_oii3729_core_voff,velscale)
host_model = host_model - na_oii3729_core
comp_dict['na_oii3729_core'] = {'comp':na_oii3729_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, but doesn't include [OIII]5007
elif (all(comp in param_names for comp in ['na_oii3727_core_amp','na_oii3727_core_voff','na_oii3729_core_amp','na_Hg_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_neiii_core_fwhm','oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True):
# Narrow [OII]3727
na_oii3727_core_center = 3727.092 # Angstroms
na_oii3727_core_amp = p['na_oii3727_core_amp'] # flux units
na_oii3727_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3727_core_center,p['na_oii3727_core_voff'])
na_oii3727_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_oii3727_core_fwhm_res)**2) # km/s
na_oii3727_core_voff = p['na_oii3727_core_voff'] # km/s
na_oii3727_core = gaussian(lam_gal,na_oii3727_core_center,na_oii3727_core_amp,na_oii3727_core_fwhm,na_oii3727_core_voff,velscale)
host_model = host_model - na_oii3727_core
comp_dict['na_oii3727_core'] = {'comp':na_oii3727_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OII]3729
na_oii3729_core_center = 3729.875 # Angstroms
na_oii3729_core_amp = p['na_oii3729_core_amp'] # flux units
na_oii3729_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3729_core_center,na_oii3727_core_voff)
na_oii3729_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_oii3729_core_fwhm_res)**2) # km/s
na_oii3729_core_voff = na_oii3727_core_voff # km/s
na_oii3729_core = gaussian(lam_gal,na_oii3729_core_center,na_oii3729_core_amp,na_oii3729_core_fwhm,na_oii3729_core_voff,velscale)
host_model = host_model - na_oii3729_core
comp_dict['na_oii3729_core'] = {'comp':na_oii3729_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
#### [NeIII]3870 #################################################################################
if all(comp in param_names for comp in ['na_neiii_core_amp','na_neiii_core_fwhm','na_neiii_core_voff'])==True:
# Narrow H-gamma
na_neiii_core_center = 3869.810 # Angstroms
na_neiii_core_amp = p['na_neiii_core_amp'] # flux units
na_neiii_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_neiii_core_center,p['na_neiii_core_voff'])
na_neiii_core_fwhm = np.sqrt(p['na_neiii_core_fwhm']**2+(na_neiii_core_fwhm_res)**2) # km/s
na_neiii_core_voff = p['na_neiii_core_voff'] # km/s
na_neiii_core = gaussian(lam_gal,na_neiii_core_center,na_neiii_core_amp,na_neiii_core_fwhm,na_neiii_core_voff,velscale)
host_model = host_model - na_neiii_core
comp_dict['na_neiii_core'] = {'comp':na_neiii_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, and includes [OIII]5007
elif (all(comp in param_names for comp in ['na_neiii_core_amp','na_neiii_core_voff','na_oiii5007_core_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_neiii_core_fwhm','na_Hg_fwhm','oiii4363_core_fwhm'])==True):
# Narrow H-gamma
na_neiii_core_center = 3869.810 # Angstroms
na_neiii_core_amp = p['na_neiii_core_amp'] # flux units
na_neiii_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_neiii_core_center,p['na_neiii_core_voff'])
na_neiii_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_neiii_core_fwhm_res)**2) # km/s
na_neiii_core_voff = p['na_neiii_core_voff'] # km/s
na_neiii_core = gaussian(lam_gal,na_neiii_core_center,na_neiii_core_amp,na_neiii_core_fwhm,na_neiii_core_voff,velscale)
host_model = host_model - na_neiii_core
comp_dict['na_neiii_core'] = {'comp':na_neiii_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, but doesn't include [OIII]5007
elif (all(comp in param_names for comp in ['na_neiii_core_amp','na_neiii_core_voff','na_Hg_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_neiii_core_fwhm','oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True):
# Narrow H-gamma
na_neiii_core_center = 3869.810 # Angstroms
na_neiii_core_amp = p['na_neiii_core_amp'] # flux units
na_neiii_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_neiii_core_center,p['na_neiii_core_voff'])
na_neiii_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_neiii_core_fwhm_res)**2) # km/s
na_neiii_core_voff = p['na_neiii_core_voff'] # km/s
na_neiii_core = gaussian(lam_gal,na_neiii_core_center,na_neiii_core_amp,na_neiii_core_fwhm,na_neiii_core_voff,velscale)
host_model = host_model - na_neiii_core
comp_dict['na_neiii_core'] = {'comp':na_neiii_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
#### H-delta #####################################################################################
if all(comp in param_names for comp in ['na_Hd_amp','na_Hd_fwhm','na_Hd_voff'])==True:
# Narrow H-gamma
na_hd_core_center = 4102.890 # Angstroms
na_hd_core_amp = p['na_Hd_amp'] # flux units
na_hd_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hd_core_center,p['na_Hd_voff'])
na_hd_core_fwhm = np.sqrt(p['na_Hd_fwhm']**2+(na_hd_core_fwhm_res)**2) # km/s
na_hd_core_voff = p['na_Hd_voff'] # km/s
na_Hd_core = gaussian(lam_gal,na_hd_core_center,na_hd_core_amp,na_hd_core_fwhm,na_hd_core_voff,velscale)
host_model = host_model - na_Hd_core
comp_dict['na_Hd_core'] = {'comp':na_Hd_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, and includes [OIII]5007
elif (all(comp in param_names for comp in ['na_Hd_amp','na_Hd_voff','na_oiii5007_core_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_Hd_fwhm','na_Hg_fwhm','oiii4363_core_fwhm'])==True):
# Narrow H-gamma
na_hd_core_center = 4102.890 # Angstroms
na_hd_core_amp = p['na_Hd_amp'] # flux units
na_hd_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hd_core_center,p['na_Hd_voff'])
na_hd_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_hd_core_fwhm_res)**2) # km/s
na_hd_core_voff = p['na_Hd_voff'] # km/s
na_Hd_core = gaussian(lam_gal,na_hd_core_center,na_hd_core_amp,na_hd_core_fwhm,na_hd_core_voff,velscale)
host_model = host_model - na_Hd_core
comp_dict['na_Hd_core'] = {'comp':na_Hd_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, but doesn't include [OIII]5007
elif (all(comp in param_names for comp in ['na_Hd_amp','na_Hd_voff','na_Hg_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_Hg_fwhm','oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True):
# Narrow H-gamma
na_hd_core_center = 4102.890 # Angstroms
na_hd_core_amp = p['na_Hd_amp'] # flux units
na_hd_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hd_core_center,p['na_Hd_voff'])
na_hd_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_hd_core_fwhm_res)**2) # km/s
na_hd_core_voff = p['na_Hd_voff'] # km/s
na_Hd_core = gaussian(lam_gal,na_hd_core_center,na_hd_core_amp,na_hd_core_fwhm,na_hd_core_voff,velscale)
host_model = host_model - na_Hd_core
comp_dict['na_Hd_core'] = {'comp':na_Hd_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
#### H-gamma/[OIII]4363 ##########################################################################
if all(comp in param_names for comp in ['na_Hg_amp','na_Hg_fwhm','na_Hg_voff','na_oiii4363_core_amp','na_oiii4363_core_fwhm','na_oiii4363_core_voff'])==True:
# Narrow H-gamma
na_hg_core_center = 4341.680 # Angstroms
na_hg_core_amp = p['na_Hg_amp'] # flux units
na_hg_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hg_core_center,p['na_Hg_voff'])
na_hg_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_hg_core_fwhm_res)**2) # km/s
na_hg_core_voff = p['na_Hg_voff'] # km/s
na_Hg_core = gaussian(lam_gal,na_hg_core_center,na_hg_core_amp,na_hg_core_fwhm,na_hg_core_voff,velscale)
host_model = host_model - na_Hg_core
comp_dict['na_Hg_core'] = {'comp':na_Hg_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OIII]4363 core
na_oiii4363_core_center = 4364.436 # Angstroms
na_oiii4363_core_amp = p['na_oiii4363_core_amp'] # flux units
na_oiii4363_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4363_core_center,p['na_oiii4363_core_voff'])
na_oiii4363_core_fwhm = np.sqrt(p['na_oiii4363_core_fwhm']**2+(na_oiii4363_core_fwhm_res)**2) # km/s
na_oiii4363_core_voff = p['na_oiii4363_core_voff'] # km/s
na_oiii4363_core = gaussian(lam_gal,na_oiii4363_core_center,na_oiii4363_core_amp,na_oiii4363_core_fwhm,na_oiii4363_core_voff,velscale)
host_model = host_model - na_oiii4363_core
comp_dict['na_oiii4363_core'] = {'comp':na_oiii4363_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, and includes [OIII]5007
elif (all(comp in param_names for comp in ['na_Hg_amp','na_Hg_voff','na_oiii4363_core_amp','na_oiii4363_core_voff','na_oiii5007_core_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_Hg_fwhm','oiii4363_core_fwhm'])==True):
# Narrow H-gamma
na_hg_core_center = 4341.680 # Angstroms
na_hg_core_amp = p['na_Hg_amp'] # flux units
na_hg_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hg_core_center,p['na_Hg_voff'])
na_hg_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_hg_core_fwhm_res)**2) # km/s
na_hg_core_voff = p['na_Hg_voff'] # km/s
na_Hg_core = gaussian(lam_gal,na_hg_core_center,na_hg_core_amp,na_hg_core_fwhm,na_hg_core_voff,velscale)
host_model = host_model - na_Hg_core
comp_dict['na_Hg_core'] = {'comp':na_Hg_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OIII]4363 core
na_oiii4363_core_center = 4364.436 # Angstroms
na_oiii4363_core_amp = p['na_oiii4363_core_amp'] # flux units
na_oiii4363_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4363_core_center,p['na_oiii4363_core_voff'])
na_oiii4363_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oiii4363_core_fwhm_res)**2) # km/s
na_oiii4363_core_voff = p['na_oiii4363_core_voff'] # km/s
na_oiii4363_core = gaussian(lam_gal,na_oiii4363_core_center,na_oiii4363_core_amp,na_oiii4363_core_fwhm,na_oiii4363_core_voff,velscale)
host_model = host_model - na_oiii4363_core
comp_dict['na_oiii4363_core'] = {'comp':na_oiii4363_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, but doesn't include [OIII]5007
elif (all(comp in param_names for comp in ['na_Hg_amp','na_Hg_fwhm','na_Hg_voff','na_oiii4363_core_amp','na_oiii4363_core_voff'])==True) & \
(all(comp not in param_names for comp in ['oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True):
# Narrow H-gamma
na_hg_core_center = 4341.680 # Angstroms
na_hg_core_amp = p['na_Hg_amp'] # flux units
na_hg_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hg_core_center,p['na_Hg_voff'])
na_hg_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_hg_core_fwhm_res)**2) # km/s
na_hg_core_voff = p['na_Hg_voff'] # km/s
na_Hg_core = gaussian(lam_gal,na_hg_core_center,na_hg_core_amp,na_hg_core_fwhm,na_hg_core_voff,velscale)
host_model = host_model - na_Hg_core
comp_dict['na_Hg_core'] = {'comp':na_Hg_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OIII]4363 core
na_oiii4363_core_center = 4364.436 # Angstroms
na_oiii4363_core_amp = p['na_oiii4363_core_amp'] # flux units
na_oiii4363_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4363_core_center,p['na_oiii4363_core_voff'])
na_oiii4363_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_oiii4363_core_fwhm_res)**2) # km/s
na_oiii4363_core_voff = p['na_oiii4363_core_voff'] # km/s
na_oiii4363_core = gaussian(lam_gal,na_oiii4363_core_center,na_oiii4363_core_amp,na_oiii4363_core_fwhm,na_oiii4363_core_voff,velscale)
host_model = host_model - na_oiii4363_core
comp_dict['na_oiii4363_core'] = {'comp':na_oiii4363_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
#### H-beta/[OIII] #########################################################################################
if all(comp in param_names for comp in ['na_oiii5007_core_amp','na_oiii5007_core_fwhm','na_oiii5007_core_voff'])==True:
# Narrow [OIII]5007 Core
na_oiii5007_core_center = 5008.240 # Angstroms
na_oiii5007_core_amp = p['na_oiii5007_core_amp'] # flux units
na_oiii5007_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii5007_core_center,p['na_oiii5007_core_voff'])
na_oiii5007_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oiii5007_core_fwhm_res)**2) # km/s
na_oiii5007_core_voff = p['na_oiii5007_core_voff'] # km/s
na_oiii5007_core = gaussian(lam_gal,na_oiii5007_core_center,na_oiii5007_core_amp,na_oiii5007_core_fwhm,na_oiii5007_core_voff,velscale)
# na_oiii5007_core = line_model(line_profile,lam_gal,na_oiii5007_core_center,na_oiii5007_core_amp,na_oiii5007_core_fwhm,na_oiii5007_core_voff,velscale)
host_model = host_model - na_oiii5007_core
comp_dict['na_oiii5007_core'] = {'comp':na_oiii5007_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OIII]4959 Core
na_oiii4959_core_center = 4960.295 # Angstroms
na_oiii4959_core_amp = (1.0/3.0)*na_oiii5007_core_amp # flux units
na_oiii4959_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4959_core_center,na_oiii5007_core_voff)
na_oiii4959_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oiii4959_fwhm_res)**2) # km/s
na_oiii4959_core_voff = na_oiii5007_core_voff # km/s
na_oiii4959_core = gaussian(lam_gal,na_oiii4959_core_center,na_oiii4959_core_amp,na_oiii4959_core_fwhm,na_oiii4959_core_voff,velscale)
# na_oiii4959_core = line_model(line_profile,lam_gal,na_oiii4959_core_center,na_oiii4959_core_amp,na_oiii4959_core_fwhm,na_oiii4959_core_voff,velscale)
host_model = host_model - na_oiii4959_core
comp_dict['na_oiii4959_core'] = {'comp':na_oiii4959_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
if all(comp in param_names for comp in ['na_Hb_core_amp','na_Hb_core_voff'])==True:
# Narrow H-beta
na_hb_core_center = 4862.680 # Angstroms
na_hb_core_amp = p['na_Hb_core_amp'] # flux units
na_hb_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hb_core_center,p['na_Hb_core_voff'])
na_hb_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_hb_core_fwhm_res)**2) # km/s
na_hb_core_voff = p['na_Hb_core_voff'] # km/s
na_Hb_core = gaussian(lam_gal,na_hb_core_center,na_hb_core_amp,na_hb_core_fwhm,na_hb_core_voff,velscale)
# na_Hb_core = line_model(line_profile,lam_gal,na_hb_core_center,na_hb_core_amp,na_hb_core_fwhm,na_hb_core_voff,velscale)
host_model = host_model - na_Hb_core
comp_dict['na_Hb_core'] = {'comp':na_Hb_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
#### H-alpha/[NII]/[SII] ####################################################################################
if all(comp in param_names for comp in ['na_Ha_core_amp','na_Ha_core_fwhm','na_Ha_core_voff',
'na_nii6585_core_amp',
'na_sii6732_core_amp','na_sii6718_core_amp'])==True:
# Narrow H-alpha
na_ha_core_center = 6564.610 # Angstroms
na_ha_core_amp = p['na_Ha_core_amp'] # flux units
na_ha_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_core_center,p['na_Ha_core_voff'])
na_ha_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_ha_core_fwhm_res)**2) # km/s
na_ha_core_voff = p['na_Ha_core_voff'] # km/s
na_Ha_core = gaussian(lam_gal,na_ha_core_center,na_ha_core_amp,na_ha_core_fwhm,na_ha_core_voff,velscale)
host_model = host_model - na_Ha_core
comp_dict['na_Ha_core'] = {'comp':na_Ha_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [NII]6585 Core
na_nii6585_core_center = 6585.270 # Angstroms
na_nii6585_core_amp = p['na_nii6585_core_amp'] # flux units
na_nii6585_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_core_center,na_ha_core_voff)
na_nii6585_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_nii6585_core_fwhm_res)**2) # km/s
na_nii6585_core_voff = na_ha_core_voff
na_nii6585_core = gaussian(lam_gal,na_nii6585_core_center,na_nii6585_core_amp,na_nii6585_core_fwhm,na_nii6585_core_voff,velscale)
host_model = host_model - na_nii6585_core
comp_dict['na_nii6585_core'] = {'comp':na_nii6585_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [NII]6549 Core
na_nii6549_core_center = 6549.860 # Angstroms
na_nii6549_core_amp = (1.0/2.93)*na_nii6585_core_amp # flux units
na_nii6549_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_core_center,na_ha_core_voff)
na_nii6549_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_nii6549_core_fwhm_res)**2) # km/s # km/s
na_nii6549_core_voff = na_ha_core_voff
na_nii6549_core = gaussian(lam_gal,na_nii6549_core_center,na_nii6549_core_amp,na_nii6549_core_fwhm,na_nii6549_core_voff,velscale)
host_model = host_model - na_nii6549_core
comp_dict['na_nii6549_core'] = {'comp':na_nii6549_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [SII]6718
na_sii6718_core_center = 6718.290 # Angstroms
na_sii6718_core_amp = p['na_sii6718_core_amp'] # flux units
na_sii6718_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_core_center,na_ha_core_voff)
na_sii6718_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_sii6718_core_fwhm_res)**2) # km/s #na_sii6732_fwhm # km/s
na_sii6718_core_voff = na_ha_core_voff
na_sii6718_core = gaussian(lam_gal,na_sii6718_core_center,na_sii6718_core_amp,na_sii6718_core_fwhm,na_sii6718_core_voff,velscale)
host_model = host_model - na_sii6718_core
comp_dict['na_sii6718_core'] = {'comp':na_sii6718_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [SII]6732
na_sii6732_core_center = 6732.670 # Angstroms
na_sii6732_core_amp = p['na_sii6732_core_amp'] # flux units
na_sii6732_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_core_center,na_ha_core_voff)
na_sii6732_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_sii6732_core_fwhm_res)**2) # km/s
na_sii6732_core_voff = na_ha_core_voff
na_sii6732_core = gaussian(lam_gal,na_sii6732_core_center,na_sii6732_core_amp,na_sii6732_core_fwhm,na_sii6732_core_voff,velscale)
host_model = host_model - na_sii6732_core
comp_dict['na_sii6732_core'] = {'comp':na_sii6732_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
elif (all(comp in param_names for comp in ['na_Ha_core_amp','na_Ha_core_voff',
'na_nii6585_core_amp',
'na_sii6732_core_amp','na_sii6718_core_amp',
'na_oiii5007_core_fwhm'])==True) & ('na_Ha_core_fwhm' not in param_names):
# If all narrow line widths are tied to [OIII]5007 FWHM...
# Narrow H-alpha
na_ha_core_center = 6564.610 # Angstroms
na_ha_core_amp = p['na_Ha_core_amp'] # flux units
na_ha_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_core_center,p['na_Ha_core_voff'])
na_ha_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_ha_core_fwhm_res)**2) # km/s
na_ha_core_voff = p['na_Ha_core_voff'] # km/s
na_Ha_core = gaussian(lam_gal,na_ha_core_center,na_ha_core_amp,na_ha_core_fwhm,na_ha_core_voff,velscale)
host_model = host_model - na_Ha_core
comp_dict['na_Ha_core'] = {'comp':na_Ha_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [NII]6585 Core
na_nii6585_core_center = 6585.270 # Angstroms
na_nii6585_core_amp = p['na_nii6585_core_amp'] # flux units
na_nii6585_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_core_center,na_ha_core_voff)
na_nii6585_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_nii6585_core_fwhm_res)**2) # km/s
na_nii6585_core_voff = na_ha_core_voff
na_nii6585_core = gaussian(lam_gal,na_nii6585_core_center,na_nii6585_core_amp,na_nii6585_core_fwhm,na_nii6585_core_voff,velscale)
host_model = host_model - na_nii6585_core
comp_dict['na_nii6585_core'] = {'comp':na_nii6585_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [NII]6549 Core
na_nii6549_core_center = 6549.860 # Angstroms
na_nii6549_core_amp = (1.0/2.93)*na_nii6585_core_amp # flux units
na_nii6549_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_core_center,na_ha_core_voff)
na_nii6549_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_nii6549_core_fwhm_res)**2) # km/s
na_nii6549_core_voff = na_ha_core_voff
na_nii6549_core = gaussian(lam_gal,na_nii6549_core_center,na_nii6549_core_amp,na_nii6549_core_fwhm,na_nii6549_core_voff,velscale)
host_model = host_model - na_nii6549_core
comp_dict['na_nii6549_core'] = {'comp':na_nii6549_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [SII]6732
na_sii6732_core_center = 6732.670 # Angstroms
na_sii6732_core_amp = p['na_sii6732_core_amp'] # flux units
na_sii6732_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_core_center,na_ha_core_voff)
na_sii6732_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_sii6732_core_fwhm_res)**2) # km/s
na_sii6732_core_voff = na_ha_core_voff
na_sii6732_core = gaussian(lam_gal,na_sii6732_core_center,na_sii6732_core_amp,na_sii6732_core_fwhm,na_sii6732_core_voff,velscale)
host_model = host_model - na_sii6732_core
comp_dict['na_sii6732_core'] = {'comp':na_sii6732_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [SII]6718
na_sii6718_core_center = 6718.290 # Angstroms
na_sii6718_core_amp = p['na_sii6718_core_amp'] # flux units
na_sii6718_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_core_center,na_ha_core_voff)
na_sii6718_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_sii6718_core_fwhm_res)**2) # km/s
na_sii6718_core_voff = na_ha_core_voff
na_sii6718_core = gaussian(lam_gal,na_sii6718_core_center,na_sii6718_core_amp,na_sii6718_core_fwhm,na_sii6718_core_voff,velscale)
host_model = host_model - na_sii6718_core
comp_dict['na_sii6718_core'] = {'comp':na_sii6718_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
########################################################################################################
# Outflow Components
#### Hb/[OIII] outflows ################################################################################
if (all(comp in param_names for comp in ['na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff'])==True):
# Broad [OIII]5007 Outflow;
na_oiii5007_outflow_center = 5008.240 # Angstroms
na_oiii5007_outflow_amp = p['na_oiii5007_outflow_amp'] # flux units
na_oiii5007_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii5007_outflow_center,p['na_oiii5007_outflow_voff'])
na_oiii5007_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_oiii5007_outflow_fwhm_res)**2) # km/s
na_oiii5007_outflow_voff = p['na_oiii5007_outflow_voff'] # km/s
na_oiii5007_outflow = gaussian(lam_gal,na_oiii5007_outflow_center,na_oiii5007_outflow_amp,na_oiii5007_outflow_fwhm,na_oiii5007_outflow_voff,velscale)
host_model = host_model - na_oiii5007_outflow
comp_dict['na_oiii5007_outflow'] = {'comp':na_oiii5007_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# Broad [OIII]4959 Outflow;
na_oiii4959_outflow_center = 4960.295 # Angstroms
na_oiii4959_outflow_amp = na_oiii4959_core_amp*na_oiii5007_outflow_amp/na_oiii5007_core_amp # flux units
na_oiii4959_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4959_outflow_center,na_oiii5007_outflow_voff)
na_oiii4959_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_oiii4959_outflow_fwhm_res)**2) # km/s
na_oiii4959_outflow_voff = na_oiii5007_outflow_voff # km/s
if (na_oiii4959_outflow_amp!=na_oiii4959_outflow_amp/1.0) or (na_oiii4959_outflow_amp==np.inf): na_oiii4959_outflow_amp=0.0
na_oiii4959_outflow = gaussian(lam_gal,na_oiii4959_outflow_center,na_oiii4959_outflow_amp,na_oiii4959_outflow_fwhm,na_oiii4959_outflow_voff,velscale)
host_model = host_model - na_oiii4959_outflow
comp_dict['na_oiii4959_outflow'] = {'comp':na_oiii4959_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
if (all(comp in param_names for comp in ['na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff','na_Hb_core_amp','na_Hb_core_voff'])==True):
# Broad H-beta Outflow; only a model, no free parameters, tied to [OIII]5007
na_hb_core_center = 4862.680 # Angstroms
na_hb_outflow_amp = na_hb_core_amp*na_oiii5007_outflow_amp/na_oiii5007_core_amp
na_hb_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hb_core_center,na_hb_core_voff+na_oiii5007_outflow_voff)
na_hb_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_hb_outflow_fwhm_res)**2) # km/s
na_hb_outflow_voff = na_hb_core_voff+na_oiii5007_outflow_voff # km/s
if (na_hb_outflow_amp!=na_hb_outflow_amp/1.0) or (na_hb_outflow_amp==np.inf): na_hb_outflow_amp=0.0
na_Hb_outflow = gaussian(lam_gal,na_hb_core_center,na_hb_outflow_amp,na_hb_outflow_fwhm,na_hb_outflow_voff,velscale)
host_model = host_model - na_Hb_outflow
comp_dict['na_Hb_outflow'] = {'comp':na_Hb_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
#### Ha/[NII]/[SII] outflows ###########################################################################
# Outflows in H-alpha/[NII] are poorly constrained due to the presence of a broad line and/or blending of narrow lines
# First, we check if the fit includes Hb/[OIII] outflows, if it does, we use the outflow in [OIII] to constrain the outflows
# in the Ha/[NII]/[SII] region. If the fi does NOT include Hb/[OIII] outflows (*not recommended*), we then allow the outflows
# in the Ha/[NII]/[SII] region to be fit as free parameters.
if (all(comp in param_names for comp in ['na_Ha_core_amp','na_Ha_core_voff','na_nii6585_core_amp','na_sii6732_core_amp','na_sii6718_core_amp',
'na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff'])==True) and \
(all(comp not in param_names for comp in ['na_Ha_outflow_amp','na_Ha_outflow_fwhm','na_Ha_outflow_voff'])==True):
# H-alpha Outflow;
na_ha_outflow_center = 6564.610 # Angstroms
na_ha_outflow_amp = p['na_Ha_core_amp']*p['na_oiii5007_outflow_amp']/p['na_oiii5007_core_amp'] # flux units
na_ha_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_outflow_center,p['na_oiii5007_outflow_voff'])
na_ha_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_ha_outflow_fwhm_res)**2) # km/s
na_ha_outflow_voff = p['na_oiii5007_outflow_voff'] # km/s # km/s
if (na_ha_outflow_amp!=na_ha_outflow_amp/1.0) or (na_ha_outflow_amp==np.inf): na_ha_outflow_amp=0.0
na_Ha_outflow = gaussian(lam_gal,na_ha_outflow_center,na_ha_outflow_amp,na_ha_outflow_fwhm,na_ha_outflow_voff,velscale)
host_model = host_model - na_Ha_outflow
comp_dict['na_Ha_outflow'] = {'comp':na_Ha_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [NII]6585 Outflow;
na_nii6585_outflow_center = 6585.270 # Angstroms
na_nii6585_outflow_amp = na_nii6585_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_nii6585_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_outflow_center,na_ha_outflow_voff)
na_nii6585_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_nii6585_outflow_fwhm_res)**2)
na_nii6585_outflow_voff = na_ha_outflow_voff
if (na_nii6585_outflow_amp!=na_nii6585_outflow_amp/1.0) or (na_nii6585_outflow_amp==np.inf): na_nii6585_outflow_amp=0.0
na_nii6585_outflow = gaussian(lam_gal,na_nii6585_outflow_center,na_nii6585_outflow_amp,na_nii6585_outflow_fwhm,na_nii6585_outflow_voff,velscale)
host_model = host_model - na_nii6585_outflow
comp_dict['na_nii6585_outflow'] = {'comp':na_nii6585_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [NII]6549 Outflow;
na_nii6549_outflow_center = 6549.860 # Angstroms
na_nii6549_outflow_amp = na_nii6549_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_nii6549_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_outflow_center,na_ha_outflow_voff)
na_nii6549_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_nii6549_outflow_fwhm_res)**2) # km/s
na_nii6549_outflow_voff = na_ha_outflow_voff # km/s
if (na_nii6549_outflow_amp!=na_nii6549_outflow_amp/1.0) or (na_nii6549_outflow_amp==np.inf): na_nii6549_outflow_amp=0.0
na_nii6549_outflow = gaussian(lam_gal,na_nii6549_outflow_center,na_nii6549_outflow_amp,na_nii6549_outflow_fwhm,na_nii6549_outflow_voff,velscale)
host_model = host_model - na_nii6549_outflow
comp_dict['na_nii6549_outflow'] = {'comp':na_nii6549_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# Broad [SII]6718 Outflow;
na_sii6718_outflow_center = 6718.290 # Angstroms
na_sii6718_outflow_amp = na_sii6718_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_sii6718_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_outflow_center,na_ha_outflow_voff)
na_sii6718_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_sii6718_outflow_fwhm_res)**2) # km/s
na_sii6718_outflow_voff = na_ha_outflow_voff # km/s
if (na_sii6718_outflow_amp!=na_sii6718_outflow_amp/1.0) or (na_sii6718_outflow_amp==np.inf): na_sii6718_outflow_amp=0.0
na_sii6718_outflow = gaussian(lam_gal,na_sii6718_outflow_center,na_sii6718_outflow_amp,na_sii6718_outflow_fwhm,na_sii6718_outflow_voff,velscale)
host_model = host_model - na_sii6718_outflow
comp_dict['na_sii6718_outflow'] = {'comp':na_sii6718_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [SII]6732 Outflow;
na_sii6732_outflow_center = 6732.670 # Angstroms
na_sii6732_outflow_amp = na_sii6732_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_sii6732_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_outflow_center,na_ha_outflow_voff)
na_sii6732_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_sii6732_outflow_fwhm_res)**2) # km/s
na_sii6732_outflow_voff = na_ha_outflow_voff # km/s
if (na_sii6732_outflow_amp!=na_sii6732_outflow_amp/1.0) or (na_sii6732_outflow_amp==np.inf): na_sii6732_outflow_amp=0.0
na_sii6732_outflow = gaussian(lam_gal,na_sii6732_outflow_center,na_sii6732_outflow_amp,na_sii6732_outflow_fwhm,na_sii6732_outflow_voff,velscale)
host_model = host_model - na_sii6732_outflow
comp_dict['na_sii6732_outflow'] = {'comp':na_sii6732_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
elif (all(comp in param_names for comp in ['na_Ha_outflow_amp','na_Ha_outflow_fwhm','na_Ha_outflow_voff'])==True) and \
(all(comp not in param_names for comp in ['na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff'])==True):
# H-alpha Outflow;
na_ha_outflow_center = 6564.610 # Angstroms
na_ha_outflow_amp = p['na_Ha_outflow_amp'] # flux units
na_ha_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_outflow_center,p['na_Ha_outflow_voff'])
na_ha_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_ha_outflow_fwhm_res)**2) # km/s
na_ha_outflow_voff = p['na_Ha_outflow_voff'] # km/s # km/s
if (na_ha_outflow_amp!=na_ha_outflow_amp/1.0) or (na_ha_outflow_amp==np.inf): na_ha_outflow_amp=0.0
na_Ha_outflow = gaussian(lam_gal,na_ha_outflow_center,na_ha_outflow_amp,na_ha_outflow_fwhm,na_ha_outflow_voff,velscale)
host_model = host_model - na_Ha_outflow
comp_dict['na_Ha_outflow'] = {'comp':na_Ha_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [NII]6585 Outflow;
na_nii6585_outflow_center = 6585.270 # Angstroms
na_nii6585_outflow_amp = na_nii6585_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_nii6585_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_outflow_center,na_ha_outflow_voff)
na_nii6585_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_nii6585_outflow_fwhm_res)**2)
na_nii6585_outflow_voff = na_ha_outflow_voff
if (na_nii6585_outflow_amp!=na_nii6585_outflow_amp/1.0) or (na_nii6585_outflow_amp==np.inf): na_nii6585_outflow_amp=0.0
na_nii6585_outflow = gaussian(lam_gal,na_nii6585_outflow_center,na_nii6585_outflow_amp,na_nii6585_outflow_fwhm,na_nii6585_outflow_voff,velscale)
host_model = host_model - na_nii6585_outflow
comp_dict['na_nii6585_outflow'] = {'comp':na_nii6585_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [NII]6549 Outflow;
na_nii6549_outflow_center = 6549.860 # Angstroms
na_nii6549_outflow_amp = na_nii6549_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_nii6549_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_outflow_center,na_ha_outflow_voff)
na_nii6549_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_nii6549_outflow_fwhm_res)**2) # km/s
na_nii6549_outflow_voff = na_ha_outflow_voff # km/s
if (na_nii6549_outflow_amp!=na_nii6549_outflow_amp/1.0) or (na_nii6549_outflow_amp==np.inf): na_nii6549_outflow_amp=0.0
na_nii6549_outflow = gaussian(lam_gal,na_nii6549_outflow_center,na_nii6549_outflow_amp,na_nii6549_outflow_fwhm,na_nii6549_outflow_voff,velscale)
host_model = host_model - na_nii6549_outflow
comp_dict['na_nii6549_outflow'] = {'comp':na_nii6549_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# Broad [SII]6718 Outflow;
na_sii6718_outflow_center = 6718.290 # Angstroms
na_sii6718_outflow_amp = na_sii6718_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_sii6718_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_outflow_center,na_ha_outflow_voff)
na_sii6718_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_sii6718_outflow_fwhm_res)**2) # km/s
na_sii6718_outflow_voff = na_ha_outflow_voff # km/s
if (na_sii6718_outflow_amp!=na_sii6718_outflow_amp/1.0) or (na_sii6718_outflow_amp==np.inf): na_sii6718_outflow_amp=0.0
na_sii6718_outflow = gaussian(lam_gal,na_sii6718_outflow_center,na_sii6718_outflow_amp,na_sii6718_outflow_fwhm,na_sii6718_outflow_voff,velscale)
host_model = host_model - na_sii6718_outflow
comp_dict['na_sii6718_outflow'] = {'comp':na_sii6718_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [SII]6732 Outflow;
na_sii6732_outflow_center = 6732.670 # Angstroms
na_sii6732_outflow_amp = na_sii6732_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_sii6732_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_outflow_center,na_ha_outflow_voff)
na_sii6732_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_sii6732_outflow_fwhm_res)**2) # km/s
na_sii6732_outflow_voff = na_ha_outflow_voff # km/s
if (na_sii6732_outflow_amp!=na_sii6732_outflow_amp/1.0) or (na_sii6732_outflow_amp==np.inf): na_sii6732_outflow_amp=0.0
na_sii6732_outflow = gaussian(lam_gal,na_sii6732_outflow_center,na_sii6732_outflow_amp,na_sii6732_outflow_fwhm,na_sii6732_outflow_voff,velscale)
host_model = host_model - na_sii6732_outflow
comp_dict['na_sii6732_outflow'] = {'comp':na_sii6732_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
########################################################################################################
# Broad Lines
#### Br. H-gamma #######################################################################################
if all(comp in param_names for comp in ['br_Hg_amp','br_Hg_fwhm','br_Hg_voff'])==True:
br_hg_center = 4341.680 # Angstroms
br_hg_amp = p['br_Hg_amp'] # flux units
br_hg_fwhm_res = get_fwhm_res(fwhm_gal_ftn,br_hg_center,p['br_Hg_voff'])
br_hg_fwhm = np.sqrt(p['br_Hg_fwhm']**2+(br_hg_fwhm_res)**2) # km/s
br_hg_voff = p['br_Hg_voff'] # km/s
# br_Hg = gaussian(lam_gal,br_hg_center,br_hg_amp,br_hg_fwhm,br_hg_voff,velscale)
br_Hg = line_model(line_profile,lam_gal,br_hg_center,br_hg_amp,br_hg_fwhm,br_hg_voff,velscale)
host_model = host_model - br_Hg
comp_dict['br_Hg'] = {'comp':br_Hg,'pcolor':'xkcd:turquoise','linewidth':1.0}
#### Br. H-beta ########################################################################################
if all(comp in param_names for comp in ['br_Hb_amp','br_Hb_fwhm','br_Hb_voff'])==True:
br_hb_center = 4862.68 # Angstroms
br_hb_amp = p['br_Hb_amp'] # flux units
br_hb_fwhm_res = get_fwhm_res(fwhm_gal_ftn,br_hb_center,p['br_Hb_voff'])
br_hb_fwhm = np.sqrt(p['br_Hb_fwhm']**2+(br_hb_fwhm_res)**2) # km/s
br_hb_voff = p['br_Hb_voff'] # km/s
# br_Hb = gaussian(lam_gal,br_hb_center,br_hb_amp,br_hb_fwhm,br_hb_voff,velscale)
br_Hb = line_model(line_profile,lam_gal,br_hb_center,br_hb_amp,br_hb_fwhm,br_hb_voff,velscale)
host_model = host_model - br_Hb
comp_dict['br_Hb'] = {'comp':br_Hb,'pcolor':'xkcd:turquoise','linewidth':1.0}
#### Br. H-alpha #######################################################################################
if all(comp in param_names for comp in ['br_Ha_amp','br_Ha_fwhm','br_Ha_voff'])==True:
br_ha_center = 6564.610 # Angstroms
br_ha_amp = p['br_Ha_amp'] # flux units
br_ha_fwhm_res = get_fwhm_res(fwhm_gal_ftn,br_ha_center,p['br_Ha_voff'])
br_ha_fwhm = np.sqrt(p['br_Ha_fwhm']**2+(br_ha_fwhm_res)**2) # km/s
br_ha_voff = p['br_Ha_voff'] # km/s
# br_Ha = gaussian(lam_gal,br_ha_center,br_ha_amp,br_ha_fwhm,br_ha_voff,velscale)
br_Ha = line_model(line_profile,lam_gal,br_ha_center,br_ha_amp,br_ha_fwhm,br_ha_voff,velscale)
host_model = host_model - br_Ha
comp_dict['br_Ha'] = {'comp':br_Ha,'pcolor':'xkcd:turquoise','linewidth':1.0}
########################################################################################################
########################################################################################################
############################# Host-galaxy Component ######################################################
if all(comp in param_names for comp in ['gal_temp_amp'])==True:
gal_temp = p['gal_temp_amp']*(gal_temp)
host_model = (host_model) - (gal_temp) # Subtract off continuum from galaxy, since we only want template weights to be fit
comp_dict['host_galaxy'] = {'comp':gal_temp,'pcolor':'xkcd:lime green','linewidth':1.0}
########################################################################################################
############################# LOSVD Component ####################################################
if all(comp in param_names for comp in ['stel_vel','stel_disp'])==True:
# Convolve the templates with a LOSVD
losvd_params = [p['stel_vel'],p['stel_disp']] # ind 0 = velocity*, ind 1 = sigma*
conv_temp = convolve_gauss_hermite(temp_fft,npad,float(velscale),\
losvd_params,npix,velscale_ratio=1,sigma_diff=0,vsyst=vsyst)
# Fitted weights of all templates using Non-negative Least Squares (NNLS)
host_model[host_model/host_model!=1] = 0
weights = nnls(conv_temp,host_model) # scipy.optimize Non-negative Least Squares
host_galaxy = (np.sum(weights*conv_temp,axis=1))
comp_dict['host_galaxy'] = {'comp':host_galaxy,'pcolor':'xkcd:lime green','linewidth':1.0}
########################################################################################################
# The final model
gmodel = np.sum((d['comp'] for d in comp_dict.values() if d),axis=0)
########################## Measure Emission Line Fluxes #################################################
# Fluxes of components are stored in a dictionary and returned to emcee as metadata blob.
# This is a vast improvement over the previous method, which was storing fluxes in an
# output file at each iteration, which is computationally expensive for opening, writing to, and closing
# a file nwalkers x niter times.
if (fit_type=='final') and (output_model==False):
fluxes = {}
for key in comp_dict:
# compute the integrated flux
flux = simps(comp_dict[key]['comp'],lam_gal)
# add key/value pair to dictionary
fluxes[key+'_flux'] = flux
##################################################################################
# Add last components to comp_dict for plotting purposes
# Add galaxy, sigma, model, and residuals to comp_dict
comp_dict['data'] = {'comp':galaxy ,'pcolor':'xkcd:white', 'linewidth':0.5}
comp_dict['wave'] = {'comp':lam_gal ,'pcolor':'xkcd:black', 'linewidth':0.5}
comp_dict['noise'] = {'comp':noise ,'pcolor':'xkcd:cyan' , 'linewidth':0.5}
comp_dict['model'] = {'comp':gmodel ,'pcolor':'xkcd:red' , 'linewidth':1.0}
comp_dict['resid'] = {'comp':galaxy-gmodel ,'pcolor':'xkcd:white', 'linewidth':0.5}
##################################################################################
##################################################################################
if (fit_type=='init') and (output_model==False): # For max. likelihood fitting
return gmodel
if (fit_type=='init') and (output_model==True): # For max. likelihood fitting
return comp_dict
elif (fit_type=='outflow_test'):
return comp_dict
elif (fit_type=='final') and (output_model==False): # For emcee
return gmodel, fluxes
elif (fit_type=='final') and (output_model==True): # output all models for best-fit model
return comp_dict | 44cd0bc61a4472c6a5c3c7b190ee5be96f4bdb1a | 18,204 |
import random
def generate_numbers():
"""
Function to generate 3 random digits to be guessed.
Generate 3 random in a list in order to be compare to the user's digits.
Return:
str_digits (Array): List with 3 random digits converted to String
"""
# List comprehension to generate numbers from 0 to 9 and cast it as String
str_digits = [str(num) for num in range(10)]
# Shuffle randomly the list
random.shuffle(str_digits)
return str_digits[:3] | 8efd0f579a3a0b3dc5021cd762f9ad2f5774f6be | 18,205 |
def get_media():
"""Retrieves metadata for all of this server's uploaded media. Can use
the following query parameters:
* max: The maximum number of records to return
* page: The page of records
"""
error_on_unauthorized()
media = Upload.query.order_by(Upload.id)
total_num = media.count()
if total_num == 0:
return jsonify(total=0, uploads=[])
try:
count = int(request.args.get('max', total_num))
page = int(request.args.get('page', 1))
if count <= 0 or page <= 0:
raise APIError(422, "Query parameters out of range")
begin = (page - 1) * count
end = min(begin + count, total_num)
return jsonify(total=total_num, uploads=[upload_to_dict(u) for u in media.all()[begin:end]]), 200
except ValueError:
raise APIError(422, "Invalid query parameter") | 754417b47f5b9c28427b04ace88bf9ca5c9a5a47 | 18,206 |
def summate2(phasevec):
"""Calculate values b'(j^vec) for combining 2 phase vectors.
Parameter:
phasevec: tuple of two phasevectors
Example:
On input (([b_1(0),b_1(1),...,b_1(L-1)], L), ([b_2(0),b_2(1),...,b_2(L'-1)], L'))
give output [b_1(0)+b_2(0), b_1(0)+b_2(1),..., b_1(1)+b_2(0),...,b_1(L-1)+b_2(L'-1)]
"""
b = [] # array for values of summed phasevector
for i in range(phasevec[0][1]):
for j in range(phasevec[1][1]):
b.append(phasevec[0][0][i] + phasevec[1][0][j])
return b | 5150c2ee29a31438bf16104eaadeb85a01f54502 | 18,207 |
def makeTracker( path, args = (), kwargs = {} ):
"""retrieve an instantiated tracker and its associated code.
returns a tuple (code, tracker).
"""
obj, module, pathname, cls = makeObject( path, args, kwargs )
code = getCode( cls, pathname )
return code, obj | bc23e21bb53357bcf74e6194656cfbea4b24c218 | 18,209 |
from typing import Tuple
def get_anchor_generator(anchor_size: Tuple[tuple] = None, aspect_ratios: Tuple[tuple] = None):
"""Returns the anchor generator."""
if anchor_size is None:
anchor_size = ((16,), (32,), (64,), (128,))
if aspect_ratios is None:
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_size)
anchor_generator = AnchorGenerator(sizes=anchor_size,
aspect_ratios=aspect_ratios)
return anchor_generator | e9eef959c009062d5866558d00674c1afa033260 | 18,210 |
import torch
def tensor_to_longs(tensor: torch.Tensor) -> list:
"""converts an array of numerical values to a tensor of longs"""
assert tensor.dtype == torch.long
return tensor.detach().cpu().numpy() | ba1788be8e353936cfc3d604d940b78a96990fd4 | 18,211 |
def test_fixed(SNRs):
"""
Fixed (infinite T1) qubit.
"""
fidelities = []
numShots = 10000
dt = 1e-3
for SNR in SNRs:
fakeData = create_fake_data(SNR, dt, 1, numShots, T1=1e9)
signal = dt*np.sum(fakeData, axis=1)
fidelities.append(fidelity_est(signal))
return fidelities | 70ca68f475beed73a47722c719811544ae1bfccb | 18,212 |
def setup(app):
"""
Add the ``fica`` directive to the Sphinx app.
"""
app.add_directive("fica", FicaDirective)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
} | 996e568ab58634e64a845b34bf38082658b58889 | 18,213 |
from typing import Tuple
import torch
def get_binary_statistics(
outputs: Tensor, targets: Tensor, label: int = 1,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a binary classification problem for a given label.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., 1]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
label: integer, that specifies label of interest for statistics compute
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([[0, 0, 1, 1, 0, 1, 0, 1]])
>>> y_true = torch.tensor([[0, 1, 0, 1, 0, 0, 1, 1]])
>>> tn, fp, fn, tp, support = get_binary_statistics(y_pred, y_true)
tensor(2) tensor(2) tensor(2) tensor(2) tensor(4)
"""
tn = ((outputs != label) * (targets != label)).to(torch.long).sum()
fp = ((outputs == label) * (targets != label)).to(torch.long).sum()
fn = ((outputs != label) * (targets == label)).to(torch.long).sum()
tp = ((outputs == label) * (targets == label)).to(torch.long).sum()
support = (targets == label).to(torch.long).sum()
return tn, fp, fn, tp, support | e0c81b404f6da77f40c1e4f3810d699fdef1e6a4 | 18,214 |
def threshold_and_mask(min_normed_weight, W, Mc, coords): # =np.arange(Wc.shape[0])*stride + start):
"""Normalize the weights W, threshold to min_normed_weight and remove diagonal,
reduce DX and DY to the columns and rows still containing weights.
Returns
-------
coords : array_like
the indices of these columns in terms of original image indices
W_n_m : array_like
the thresholded weights
D_X_m : array_like
The reduced DX
D_Y_m : array_like
The reduced DY
row_mask : array_like
The indices of these columns in terms of calculated arrays.
"""
#coords = np.arange(Wc.shape[0])*stride + start
wcdiag = np.atleast_2d(np.diag(W))
W_n = W / np.sqrt(wcdiag.T*wcdiag)
mask = W_n - np.diag(np.diag(W_n)) > min_normed_weight
row_mask = np.any(mask, axis=0)
W_n = np.where(mask, W_n, 0)
DX, DY = Mc[0], Mc[1]
W_n_m = W_n[:, row_mask][row_mask, :]
coords = coords[row_mask]
#mask_red = mask[row_mask, :][:, row_mask]
DX_m, DY_m = DX[row_mask, :][:, row_mask], DY[row_mask, :][:, row_mask]
return coords, W_n_m, DX_m, DY_m, row_mask | 78d361cf2125cd0d3ac1a3985933e39b09538b18 | 18,215 |
import csv
def readCGcsv(filename, levels):
""" Read a .csv file of a callgraph into a dictionary keyed by callgraph level. """
cgdict = {}
with open(filename, "r") as cgcsv:
cgreader = csv.DictReader(cgcsv)
for row in cgreader:
lvl = int(row['Level'])
if (lvl < levels) or (levels <= 0):
cost = row[r'Samp%']
fname = row[r'Calltree/PE=HIDE']
node = CGNode(fname, cost)
if lvl not in cgdict.keys():
cgdict[lvl] = []
cgdict[lvl].append(node)
if lvl > 0:
cgdict[lvl - 1][-1].addCallee(node)
return cgdict | ec5dbc3d064a0cf784bfd764b996eb36677642a9 | 18,216 |
def use_colors(tones, i=None):
"""
Use specific color tones for plotting. If i is specified, this function returns a specific color from the corresponding color cycle
For custom color palettes generation check: http://colorbrewer2.org/#type=sequential&scheme=YlGnBu&n=8
Args:
tones : 'hot' or 'cold' for hot and cold colors
Returns:
color i of the color cycle
"""
hot = ['#fed976', '#feb24c', '#fd8d3c', '#fc4e2a', '#e31a1c', '#b10026']
cold = ['#a6bddb', '#67a9cf', '#3690c0', '#02818a', '#016c59', '#014636']
# cold = ['#44AE7E', '#388A8D', '#397187', '#3E568E', '#463883', '#461167']
if i is None:
if tones is 'hot':
colors = hot
elif tones is 'cold':
colors = cold
else:
colors = tones
plt.rc('axes', prop_cycle=(cycler('color', colors)))
return colors
else:
if tones is 'hot':
colors = hot
elif tones is 'cold':
colors = cold
else:
colors = tones
return colors[i % len(colors)] | e36cce208c89178af8199662edb336c2455bdc37 | 18,217 |
def fill_form(forms, form):
"""Fills a given form given a set or known forms.
:param forms: A set of known forms.
:param form: The form to fill.
:return: A mapping from form element IDs to suggested values for the form.
"""
forms = list(forms)
new_form = {}
def rec_fill_form(form, labels):
if not labels:
return new_form
unfilled_labels = []
neighbor = get_neighbor(forms, labels)
if not neighbor:
LOGGER.info('No neighbors found', labels)
for label in labels:
new_form[form['form'][label]['id']] = None
return new_form
LOGGER.info('Neighbor', neighbor)
for label in labels:
if label in neighbor['form']:
new_form[form['form'][label]['id']] = neighbor['form'][label]['value']
else:
unfilled_labels.append(label)
# LOGGER.info('unfilled', unfilled_labels)
if len(labels) == len(unfilled_labels):
for label in unfilled_labels:
new_form[form['form'][label]['id']] = None
return new_form
return rec_fill_form(form, unfilled_labels)
return rec_fill_form(form, list(form['features'])) | 3e6c1f623facb67602fa5e057080a08d0de9926d | 18,218 |
def integer_to_vector(x, options_per_element, n_elements, index_to_element):
"""Return a vector representing an action/state from a given integer.
Args:
x (int): the integer to convert.
n_options_per_element(int): number of options for each element in the vector.
n_elements (int): the number of elements in the vector to return.
index_to_element(int=>any): function which converts an integer represents a single option in one of the
vector elements and return anything that vector contains. For example, a function which returns 'UP' for 0,
1 for 'RIGHT',etc. Or a function which returns (2,2) given 10 for a 4x4 grid ((2,2) is the 10-th cell of that grid).
"""
return integer_to_vector_multiple_numbers(x, options_per_element, n_elements, index_to_element) | 2649359d6a62b047f70bfe72f8403e8343a231ab | 18,220 |
def samples_for_each_class(dataset_labels, task):
"""
Numbers of samples for each class in the task
Args:
dataset_labels Labels to count samples from
task Labels with in a task
Returns
"""
num_samples = np.zeros([len(task)], dtype=np.float32)
i = 0
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset_labels))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
num_samples[i] = len(class_indices)
i += 1
return num_samples | 96bc2c794fd955110864f59ddb96c5df1c33b8ed | 18,221 |
def requiredOneInGroup(col_name, group, dm, df, *args):
"""
If col_name is present in df, the group validation is satisfied.
If not, it still may be satisfied, but not by THIS col_name.
If col_name is missing, return col_name, else return None.
Later, we will validate to see if there is at least one None (non-missing)
value for this group.
"""
if col_name in df.columns:
# if the column name is present, return nothing
return None
else:
# if the column name is missing, return column name
return col_name | de46a4ef2f3e45381644db41d617d8c4c0845877 | 18,222 |
def persist(session, obj, return_id=True):
"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""
session.add(obj)
session.flush()
obj_id = obj.id if return_id else None # save this before obj is expunged
session.expunge(obj)
return obj_id | a308931f418616417d10d3115b0f370352778533 | 18,223 |
from unittest.mock import patch
def test_bittrex_query_asset_movement_int_transaction_id(bittrex):
"""Test that if an integer is returned for bittrex transaction id we handle it properly
Bittrex deposit withdrawals SHOULD NOT return an integer for transaction id
according to their docs https://bittrex.github.io/api/v3#definition-Order
but as we saw in practise they sometimes can.
Regression test for https://github.com/rotki/rotki/issues/2175
"""
problematic_deposit = """
[
{
"id": 1,
"status": "COMPLETED",
"quantity": 2.12345678,
"currencySymbol": "RISE",
"confirmations": 2,
"completedAt": "2014-02-13T07:38:53.883Z",
"txId": 9875231951530679373,
"cryptoAddress": "15VyEAT4uf7ycrNWZVb1eGMzrs21BH95Va",
"source": "foo"
}
]
"""
def mock_get_deposit_withdrawal(
url,
method,
json,
**kwargs,
): # pylint: disable=unused-argument
if 'deposit' in url:
response_str = problematic_deposit
else:
response_str = '[]'
return MockResponse(200, response_str)
with patch.object(bittrex.session, 'request', side_effect=mock_get_deposit_withdrawal):
movements = bittrex.query_deposits_withdrawals(
start_ts=0,
end_ts=TEST_END_TS,
only_cache=False,
)
errors = bittrex.msg_aggregator.consume_errors()
warnings = bittrex.msg_aggregator.consume_warnings()
assert len(errors) == 0
assert len(warnings) == 0
assert len(movements) == 1
assert movements[0].location == Location.BITTREX
assert movements[0].category == AssetMovementCategory.DEPOSIT
assert movements[0].timestamp == 1392277134
assert isinstance(movements[0].asset, Asset)
assert movements[0].asset == Asset('RISE')
assert movements[0].amount == FVal('2.12345678')
assert movements[0].fee == ZERO
assert movements[0].transaction_id == '9875231951530679373'
# also make sure they are written in the db
db_movements = bittrex.db.get_asset_movements(
filter_query=AssetMovementsFilterQuery.make(),
has_premium=True,
)
assert len(db_movements) == 1
assert db_movements[0] == movements[0] | 83e3ce3d8f82b159191c6b9068b54321d06bfa9a | 18,224 |
from operator import sub
def masker(mask, val):
"""Enforce the defined bits in the <mask> on <val>."""
ones = sub(r"[^1]", "0", mask)
val |= int(ones,2)
zeros = sub(r"[^0]", "1", mask)
val &= int(zeros,2)
return val | 68b3edd542b295ca7aade0eb9829e310e4c0ed2d | 18,226 |
def ct_lt_u32(val_a, val_b):
"""
Returns 1 if val_a < val_b, 0 otherwise. Constant time.
:type val_a: int
:type val_b: int
:param val_a: an unsigned integer representable as a 32 bit value
:param val_b: an unsigned integer representable as a 32 bit value
:rtype: int
"""
val_a &= 0xffffffff
val_b &= 0xffffffff
return (val_a^((val_a^val_b)|(((val_a-val_b)&0xffffffff)^val_b)))>>31 | 6816fd1e9633c0c3035d68ac657f3cb917f24527 | 18,227 |
import typing
async def is_banned(ctx: Context, user: typing.Union[discord.Member, discord.User]) -> bool:
"""Returns true if user is in guild's ban list."""
bans = await ctx.guild.bans()
for entry in bans:
if entry.user.id == user.id:
return True
return False | 2807e2d9a296afb360efe9abf9618e0ebe19e796 | 18,228 |
from typing import List
def _create_transformation_vectors_for_pixel_offsets(
detector_group: h5py.Group, wrapper: nx.NexusWrapper
) -> List[QVector3D]:
"""
Construct a transformation (as a QVector3D) for each pixel offset
"""
x_offsets = wrapper.get_field_value(detector_group, "x_pixel_offset")
y_offsets = wrapper.get_field_value(detector_group, "y_pixel_offset")
z_offsets = wrapper.get_field_value(detector_group, "z_pixel_offset")
if x_offsets is None or y_offsets is None:
raise Exception(
"In pixel_shape_component expected to find x_pixel_offset and y_pixel_offset datasets"
)
if z_offsets is None:
z_offsets = np.zeros_like(x_offsets)
# offsets datasets can be 2D to match dimensionality of detector, so flatten to 1D
return [
QVector3D(x, y, z)
for x, y, z in zip(
x_offsets.flatten(), y_offsets.flatten(), z_offsets.flatten()
)
] | 1504193d1a7731740a607f77c94a810561142c57 | 18,229 |
import random
def buildIterator(spec_name, param_spec, global_state, random_selection=False):
"""
:param param_spec: argument specification
:param random_selection: produce a continuous stream of random selections
:return: a iterator function to construct an iterator over possible values
"""
if param_spec['type'] == 'list':
if not random_selection:
return ListPermuteGroupElement(spec_name, param_spec['values'])
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.choice(param_spec['values'])))
elif 'int' in param_spec['type'] :
v = param_spec['type']
vals = [int(x) for x in v[v.rfind('[') + 1:-1].split(':')]
beg = vals[0] if len (vals) > 0 else 0
end = vals[1] if len(vals) > 1 else beg+1
if not random_selection:
increment = 1
if len(vals) > 2:
increment = vals[2]
return IteratorPermuteGroupElement(spec_name,lambda : xrange(beg, end+1,increment).__iter__())
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.randint(beg, end)))
elif 'float' in param_spec['type'] :
v = param_spec['type']
vals = [float(x) for x in v[v.rfind('[') + 1:-1].split(':')]
beg = vals[0] if len(vals) > 0 else 0
end = vals[1] if len(vals) > 1 else beg+1.0
if not random_selection:
increment = 1
if len(vals) > 2:
increment = vals[2]
return IteratorPermuteGroupElement(spec_name,lambda: np.arange(beg, end,increment).__iter__())
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: beg+ random.random()* (end-beg)))
elif param_spec['type'] == 'yesno':
if not random_selection:
return ListPermuteGroupElement(spec_name,['yes','no'])
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.choice(['yes', 'no'])))
elif param_spec['type'].startswith('donor'):
mydata = local()
local_state = mydata.current_local_state
choices = [node for node in local_state.getGraph().nodes() \
if len(local_state.getGraph().predecessors(node)) == 0]
if not random_selection:
# do not think we can save this state since it is tied to the local project
return PermuteGroupElement(spec_name,choices.__iter__)
else:
return PermuteGroupElement(spec_name, randomGeneratorFactory(lambda: random.choice(choices)))
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: None)) | d86d2af9499117614a11796c17eeccba16149092 | 18,230 |
def outlier_removal_mean(dataframe, colname, low_cut, high_cut):
"""Replace outliers with the mean on dataframe[colname]"""
col = dataframe[colname]
col_numerics = col.loc[
col.apply(
lambda x: isinstance(x, (int, float))
and (x >= low_cut and x <= high_cut)
)
]
dataframe.loc[
col.apply(
lambda x: isinstance(x, (int, float))
and (x < low_cut or x > high_cut)
),
colname,
] = col_numerics.mean()
return dataframe | 03d40bb8098d4313e468d5b4a929756354a7732c | 18,232 |
def non_repeating(value, counts, q):
"""Finds the first non-repeating string in a stream.
Args:
value (str): Latest string received in the string
counts (dict): Dictionary of strings containing the counts to determine if string is repeated
q (Queue): Container for all strings in stream that have yet determined as being repeated
Return:
str: First non-repeating string. None if all strings are repeated.
"""
q.put(value)
if value in counts:
counts[value] += 1
else:
counts[value] = 1
while not q.empty():
if counts[q.queue[0]] > 1:
q.get()
else:
return q.queue[0]
if q.empty():
return None | fc5ec025cffa0d7230d814d3677ae640cd652349 | 18,233 |
def auth_optional(request):
"""
view method for path '/sso/auth_optional'
Return
200 reponse: authenticated and authorized
204 response: not authenticated
403 reponse: authenticated,but not authorized
"""
res = _auth(request)
if res:
#authenticated, but can be authorized or not authorized
return res
else:
#not authenticated
return AUTH_NOT_REQUIRED_RESPONSE | 06416fdce6a652ca0cdc169c48219e685c13cdad | 18,234 |
def is_pip_main_available():
"""Return if the main pip function is available. Call get_pip_main before calling this function."""
return PIP_MAIN_FUNC is not None | 3d4243bb4336fbc9eb9e93b2a1cf9ec4cc129c03 | 18,235 |
import torch
def energy_target(flattened_bbox_targets, pos_bbox_targets,
pos_indices, r, max_energy):
"""Calculate energy targets based on deep watershed paper.
Args:
flattened_bbox_targets (torch.Tensor): The flattened bbox targets.
pos_bbox_targets (torch.Tensor): Bounding box lrtb values only for
positions within the bounding box. We use this as an argument
to prevent recalculating it since it is used for other things as
well.
pos_indices (torch.Tensor): The indices of values in
flattened_bbox_targets which are within a bounding box
max_energy (int): Max energy level possible.
Notes:
The energy targets are calculated as:
E_max \cdot argmax_{c \in C}[1 - \sqrt{((l-r)/2)^2 + ((t-b) / 2)^2}
/ r]
- r is a hyperparameter we would like to minimize.
- (l-r)/2 is the horizontal distance to the center and will be
assigned the variable name "horizontal"
- (t-b)/2 is the vertical distance to the center and will be
assigned the variable name "vertical"
- E_max is self.max_energy
- We don't need the argmax in this code implementation since we
already select the bounding boxes and their respective pixels in
a previous step.
Returns:
tuple: A 2 tuple with values ("pos_energies_targets",
"energies_targets"). Both are flattened but pos_energies_targets
only contains values within bounding boxes.
"""
horizontal = pos_bbox_targets[:, 0] - pos_bbox_targets[:, 2]
vertical = pos_bbox_targets[:, 1] - pos_bbox_targets[:, 3]
# print("Horizontals: {}".format(horizontal))
# print("Verticals: {}".format(vertical))
horizontal = torch.div(horizontal, 2)
vertical = torch.div(vertical, 2)
c2 = (horizontal * horizontal) + (vertical * vertical)
# print("c2: \n{}".format(c2))
# We use x * x instead of x.pow(2) since it's faster by about 30%
square_root = torch.sqrt(c2)
# print("Sqrt: \n{}".format(square_root))
type_dict = {'dtype': square_root.dtype,
'device': square_root.device}
pos_energies = (torch.tensor([1], **type_dict)
- torch.div(square_root, r))
pos_energies *= max_energy
pos_energies = torch.max(pos_energies,
torch.tensor([0], **type_dict))
pos_energies = pos_energies.floor()
energies_targets = torch.zeros(flattened_bbox_targets.shape[0],
**type_dict)
energies_targets[pos_indices] = pos_energies
# torch.set_printoptions(profile='full')
# print("Energy targets: \n {}".format(pos_energies))
# torch.set_printoptions(profile='default')
# input()
return pos_energies, energies_targets | 84bed4cc1a8bf11be778b7e79524707a49482b39 | 18,236 |
def dashtable(df):
"""
Convert df to appropriate format for dash datatable
PARAMETERS
----------
df: pd.DataFrame,
OUTPUT
----------
dash_cols: list containg columns for dashtable
df: dataframe for dashtable
drop_dict: dict containg dropdown list for dashtable
"""
dash_cols = [] # create dashtable column names
for x in df.columns :
temp_dict = {'name':x,'id':x}
if x in dropdown_cols:
temp_dict.update({'presentation': 'dropdown'})
# append to list
dash_cols.append(temp_dict)
# get dropdown contents for each column
drop_dict = {}
for i in range(len(dropdown_cols)): # loop through dropdown columns
drop_list = []
for x in drop_options[i]: # loop through column elements
drop_list.append({'label': x, 'value': x})
drop_dict.update({dropdown_cols[i]:{'options': drop_list, 'clearable':False}}) # append to dict
return dash_cols, df, drop_dict | 39897244f81a5c6ac0595aac7cb219f59d6c5739 | 18,237 |
def other_identifiers_to_metax(identifiers_list):
"""Convert other identifiers to comply with Metax schema.
Arguments:
identifiers_list (list): List of other identifiers from frontend.
Returns:
list: List of other identifiers that comply to Metax schema.
"""
other_identifiers = []
for identifier in identifiers_list:
id_dict = {}
id_dict["notation"] = identifier
other_identifiers.append(id_dict)
return other_identifiers | 986c98d5a557fb4fb75ed940d3f39a9a0ec93527 | 18,238 |
def enforce_excel_cell_string_limit(long_string, limit):
"""
Trims a long string. This function aims to address a limitation of CSV
files, where very long strings which exceed the char cell limit of Excel
cause weird artifacts to happen when saving to CSV.
"""
trimmed_string = ''
if limit <= 3:
limit = 4
if len(long_string) > limit:
trimmed_string = (long_string[:(limit-3)] + '...')
return trimmed_string
else:
return long_string | 9b8bcf4590dc73425c304c8d778ae51d3e3f0bf3 | 18,239 |
def gaussian_blur(image: np.ndarray, sigma_min: float, sigma_max: float) -> np.ndarray:
"""
Blurs an image using a Gaussian filter.
Args:
image: Input image array.
sigma_min: Lower bound of Gaussian kernel standard deviation range.
sigma_max: Upper bound of Gaussian kernel standard deviation range.
Returns:
Blurred image array.
"""
sigma_value = np.random.uniform(sigma_min, sigma_max)
return cv2.GaussianBlur(image, (0, 0), sigma_value) | 2fd31d016e4961c6980770e8dd113ae7ad45a6ed | 18,240 |
def get_number_of_pcs_in_pool(pool):
"""
Retrun number of pcs in a pool
"""
pc_count = Computer.objects.filter(pool=pool).count()
return pc_count | 812de24ad2cbc738a10258f8252ca531ef72e904 | 18,241 |
from typing import List
def get_used_http_ports() -> List[int]:
"""Returns list of ports, used by http servers in existing configs."""
return [rc.http_port for rc in get_run_configs().values()] | 12982ff4d5b2327c06fef1cf874b871e2eee08c1 | 18,243 |
import io
def get_img_from_fig(fig, dpi=180, color_cvt_flag=cv2.COLOR_BGR2RGB) -> np.ndarray:
"""Make numpy array from mpl fig
Parameters
----------
fig : plt.Figure
Matplotlib figure, usually the result of plt.imshow()
dpi : int, optional
Dots per inches of the image to save. Note, that default matplotlib
figsize is given in inches. Example: px, py = w * dpi, h * dpi pixels
6.4 inches * 100 dpi = 640 pixels, by default 180
color_cvt_flag : int, optional
OpenCV cvtColor flag. to get grayscale image,
use `cv2.COLOR_BGR2GRAY`, by default `cv2.COLOR_BGR2RGB`.
Returns
-------
np.ndarray[np.uint8]
Image array
"""
with io.BytesIO() as buffer:
fig.savefig(buffer, format="png", dpi=dpi)
buffer.seek(0)
img_arr = np.frombuffer(buffer.getvalue(), dtype=np.uint8)
return cv2.cvtColor(cv2.imdecode(img_arr, 1), color_cvt_flag) | dde9f35b78df436b30d4f9452b9964c93f924252 | 18,244 |
def split_data_by_target(data, target, num_data_per_target):
"""
Args:
data: np.array [num_data, *data_dims]
target: np.array [num_data, num_targets]
target[i] is a one hot
num_data_per_target: int
Returns:
result_data: np.array [num_data_per_target * num_targets, *data_dims]
result_target: np.array
[num_data_per_target * num_targets, num_targets]
"""
num_unique_targets = len(np.unique(target, axis=0))
target_numeric = np.dot(target, np.arange(num_unique_targets))
result_data = []
result_target = []
for target_id in range(num_unique_targets):
result_data.append(data[target_numeric == target_id][:num_data_per_target])
result_target.append(target[target_numeric == target_id][:num_data_per_target])
return np.concatenate(result_data), np.concatenate(result_target) | d4425753b4d9892d2c593ec8e58e75bae0005c3d | 18,245 |
def top_mutations(mutated_scores, initial_score, top_results=10):
"""Generate list of n mutations that improve localization probability
Takes in the pd.DataFrame of predictions for mutated sequences and the
probability of the initial sequence. After substracting the initial value
from the values of the mutations, it generates a list of the mutations
that increase the probability that the protein is localized at the target
localization. The number of mutations returned is determined with the
top_results variable, which defaults to 10. Note that if there are not so
many beneficial mutations as indicated in top_results, the returned list is
shorter to avoid returning mutations that would decrease the probability of
being localized at the target localization. This means that if all
mutations are detrimental, the function returns an empty pd.DataFrame.
The returned mutations are sorted from larger increase to smaller increase
and include information about the amino acid position, the original
residue at that position, the mutation, the improvement with respect to
initial_score and the final probability of the sequence with that mutation.
Args:
mutated_scores: a pd.DataFrame with the probability predicted by the
model for each mutation (rows) at each position (columns).
initial_score: a float representing the probability predicted by the
model for the initial sequence.
top_results: an integer indicating the number of mutations to return.
Returns:
top_res: a pd.DataFrame with the mutations that improve the
probability that a protein is localized at the target localization,
showing position, mutation and improvement with respect to the
original score.
"""
# check if top_results is an integer
if type(top_results) != int:
raise TypeError("top results should be an integer")
else:
pass
# get the increase or decrease in probability of mutations compared to the
# initial_score of the original sequence
prob_change = mutated_scores - initial_score
# prepare data frame for results
top_res = pd.DataFrame(
columns=["Position", "Mutation", "Prob_increase", "Target_probability"]
)
i = 0
# initialize at max value so that it enters the loop
pred_increase = 1
# get best mutations until reaching top_results or mutations that do
# not improve the probability
while i < top_results and pred_increase > 0:
# get column with maximum value
position_mut = prob_change.max().idxmax()
# get row with maximum value
mutation = prob_change.idxmax()[position_mut]
# get increase and localization probability of the sequence with the
# mutation of interest
pred_increase = prob_change.loc[mutation, position_mut]
prob_value = mutated_scores.loc[mutation, position_mut]
# change it for nan so that we can look for next worse mutation at the
# next iteration
prob_change.loc[mutation, position_mut] = np.nan
# append to results
mut_series = pd.Series(
{
"Position": position_mut,
"Mutation": mutation,
"Prob_increase": pred_increase,
"Target_probability": prob_value,
}
)
top_res = top_res.append(mut_series, ignore_index=True)
i += 1
return top_res | f574bf7f7569e3024a42866873c5bb589ff02095 | 18,246 |
def npmat4_to_pdmat4(npmat4):
"""
# updated from cvtMat4
convert numpy.2darray to LMatrix4 defined in Panda3d
:param npmat3: a 3x3 numpy ndarray
:param npvec3: a 1x3 numpy ndarray
:return: a LMatrix3f object, see panda3d
author: weiwei
date: 20170322
"""
return Mat4(npmat4[0, 0], npmat4[1, 0], npmat4[2, 0], 0, \
npmat4[0, 1], npmat4[1, 1], npmat4[2, 1], 0, \
npmat4[0, 2], npmat4[1, 2], npmat4[2, 2], 0, \
npmat4[0, 3], npmat4[1, 3], npmat4[2, 3], 1) | 7b58014d5d354aefac84786212b6ca190a983e48 | 18,247 |
import requests
def is_at_NWRC(url):
"""
Checks that were on the NWRC network
"""
try:
r = requests.get(url)
code = r.status_code
except Exception as e:
code = 404
return code==200 | b909a9087940eb70b569ea6c686ff394e84a6ed9 | 18,248 |
import torch
def lmo(x,radius):
"""Returns v with norm(v, self.p) <= r minimizing v*x"""
shape = x.shape
if len(shape) == 4:
v = torch.zeros_like(x)
for first_dim in range(shape[0]):
for second_dim in range(shape[1]):
inner_x = x[first_dim][second_dim]
rows, cols = x[first_dim][second_dim].shape
v[first_dim][second_dim] = torch.zeros_like(inner_x)
maxIdx = torch.argmax(torch.abs(inner_x),0)
for col in range(cols):
v[first_dim][second_dim][maxIdx[col],col] = -radius*torch.sign(inner_x[maxIdx[col],col])
elif len(shape) == 3:
v = torch.zeros_like(x)
for first_dim in range(shape[0]):
inner_x = x[first_dim]
rows, cols = x[first_dim].shape
v[first_dim] = torch.zeros_like(inner_x)
maxIdx = torch.argmax(torch.abs(inner_x),0)
for col in range(cols):
v[first_dim][maxIdx[col],col] = -radius*torch.sign(inner_x[maxIdx[col],col])
elif len(shape)==2:
rows, cols = x.shape
v = torch.zeros_like(x)
maxIdx = torch.argmax(torch.abs(x),0)
for col in range(cols):
v[maxIdx[col],col] = -radius*torch.sign(x[maxIdx[col],col])
else :
v = torch.zeros_like(x)
maxIdx = torch.argmax(torch.abs(x))
v.view(-1)[maxIdx] = -radius * torch.sign(x.view(-1)[maxIdx])
return v | 24bda333cdd64df9a0b4fa603211036bbdad7200 | 18,249 |
def _transform_index(index, func):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
"""
if isinstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name) | c642dd9330032ed784224b7ede6ee299b6d3ed67 | 18,250 |
def extractQualiTeaTranslations(item):
"""
# 'QualiTeaTranslations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Harry Potter and the Rise of the Ordinary Person' in item['tags']:
return None
if 'Romance of Dragons and Snakes' in item['tags']:
return buildReleaseMessageWithType(item, 'Romance of Dragons and Snakes', vol, chp, frag=frag, postfix=postfix)
return False | 446b7f7598e118222c033bbfce074fa02340fd8e | 18,251 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.