content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import urllib
def read_file(file_path):
"""Read file according to its file schema"""
s3_schema = 's3'
path_comps = urllib.parse.urlparse(file_path)
scheme = path_comps.scheme
return_result = None
if not scheme or scheme != s3_schema:
file_stream = open(file_path)
return_result = file_stream.read()
file_stream.close()
elif scheme == s3_schema:
return_result = read_s3_file(file_path)
return return_result | 438c6286f5f29792fd7c99412bead96a11adc757 | 20,666 |
from typing import List
def build_command(codemodders_list: List) -> BaseCodemodCommand:
"""Build a custom command with the list of visitors."""
class CustomCommand(BaseCodemodCommand):
transformers = codemodders_list
return CustomCommand(CodemodContext()) | 5aed3c94c954a8e62c7cfb23f2b338e3a017d988 | 20,667 |
def default_gen_mat(dt: float, size: int) -> np.ndarray:
"""Default process matrix generator.
Parameters
----------
dt : float
Dimension variable difference.
size : int
Size of the process matrix, equals to number of rows and columns.
Returns
-------
np.ndarray
Process matrix.
"""
mat = np.identity(size)
for i in range(1, size):
np.fill_diagonal(mat[:, i:], dt**i/np.math.factorial(i))
return mat | fc4c19b33dae27ec412a00d20b89c25c5bc8668c | 20,668 |
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources) | a6a0d0d6d7531b8e858c8ec0d0aedee320c20d8d | 20,669 |
def striptag(tag):
"""
Get the short representation of a fully qualified tag
:param str tag: a (fully qualified or not) XML tag
"""
if tag.startswith('{'):
return tag.rsplit('}')[1]
return tag | f0193e3f792122ba8278e599247439a91139e72b | 20,670 |
def dump_key(key):
""" Convert key into printable form using openssl utility
Used to compare keys which can be stored in different
format by different OpenSSL versions
"""
return Popen(["openssl","pkey","-text","-noout"],stdin=PIPE,stdout=PIPE).communicate(key)[0] | 71dd28876c2fd3e28a4434b926b483cef3b104c2 | 20,671 |
def download_complete(root_data_path, domain_name, start_date, end_date):
"""
Check that all files have been downloaded and that they contain the data in
the expected date range
"""
missing_files = _find_missing_files(
root_data_path=root_data_path,
domain_name=domain_name,
start_date=start_date,
end_date=end_date,
)
return len(missing_files) == 0 | 3540632c5ec48fb0741b8173f926fd1cb5970333 | 20,672 |
from typing import Any
def get_test_string(actual: Any, rtol: float, atol: float) -> str:
"""
Args:
actual: The actual value that was produced, and that should be the desired value.
rtol: The relative tolerance of the comparisons in the assertion.
atol: The absolute tolerance of the comparisons in the assertion.
Returns:
A string of Python code that produces the desired value.
"""
return str(actual) | f017806bef4336bf187071436bd454d0ca980636 | 20,673 |
def is_type_resolved(_type):
"""Helper function that checks if type is already resolved."""
return _type in BASIC_TYPES or isinstance(_type, TypeDef) | 9451a5dbf17aef1685122b881ede994d1a02b7a0 | 20,674 |
import types
def get_extension(media):
"""Gets the corresponding extension for any Telegram media."""
# Photos are always compressed as .jpg by Telegram
try:
get_input_photo(media)
return '.jpg'
except TypeError:
# These cases are not handled by input photo because it can't
if isinstance(media, (types.UserProfilePhoto, types.ChatPhoto)):
return '.jpg'
# Documents will come with a mime type
if isinstance(media, types.MessageMediaDocument):
media = media.document
if isinstance(media, (
types.Document, types.WebDocument, types.WebDocumentNoProxy)):
if media.mime_type == 'application/octet-stream':
# Octet stream are just bytes, which have no default extension
return ''
else:
return guess_extension(media.mime_type) or ''
return '' | cb05f122fdf03df38c6d7b7904c7ec611f09c7a0 | 20,675 |
def process_input(df, col_group, col_t, col_death_rate, return_df=True):
"""
Trim filter and adding extra information to the data frame.
Args:
df (pd.DataFrame): Provided data frame.
col_group (str): Column name of group definition.
col_t (str): Column name of the independent variable.
col_death_rate (str): Name for column that contains the death rate.
return_df (bool, optional):
If True return the combined data frame, otherwise return the
splitted dictionary.
Returns:
pd.DataFrame: processed data frame.
"""
assert col_group in df
assert col_t in df
assert col_death_rate in df
# trim down the data frame
df = df[[col_group, col_t, col_death_rate]].reset_index(drop=True)
df.sort_values([col_group, col_t], inplace=True)
df.columns = ['location', 'days', 'ascdr']
# check and filter and add more information
data = split_by_group(df, col_group='location')
for location, df_location in data.items():
assert df_location.shape[0] == df_location['days'].unique().size
df_location = filter_death_rate(df_location,
col_t='days',
col_death_rate='ascdr')
df_location['ln ascdr'] = np.log(df_location['ascdr'])
df_location['asddr'] = df_location['ascdr'].values - \
np.insert(df_location['ascdr'].values[:-1], 0, 0.0)
df_location['ln asddr'] = np.log(df_location['asddr'])
data.update({
location: df_location.copy()
})
if return_df:
return pd.concat(list(data.values()))
else:
return data | 24b1e7274959c5b4befbd826c1a60ca700316b2f | 20,676 |
def cross_entropy_loss():
"""
Returns an instance to compute Cross Entropy loss
"""
return tf.keras.losses.BinaryCrossentropy(from_logits=True) | 5fcc673d9339bd4acb84d55fe0c316bc4cf802c4 | 20,677 |
def f(x):
"""
Surrogate function over the error metric to be optimized
"""
evaluation = run_quip(cutoff = float(x[:,0]), delta = float(x[:,1]), n_sparse = float(x[:,2]), nlmax = float(x[:,3]))
print("\nParam: {}, {}, {}, {} | MAE : {}, R2: {}".format(float(x[:,0]),float(x[:,1]),float(x[:,2]),float(x[:,3]) ,evaluation[0],evaluation[1]))
return evaluation[0] | 0663b9eb2b717f547f57a8485739165414fbbdba | 20,678 |
def equal(* vals):
"""Returns True if all arguments are equal"""
if len(vals) < 2:
return True
a = vals[0]
for b in vals[1:]:
if a != b:
return False
return True | dbd947016d2b84faaaa7fefa6f35975da0a1b5ec | 20,679 |
def exp(x: pd.Series) -> pd.Series:
"""
Exponential of series
:param x: timeseries
:return: exponential of each element
**Usage**
For each element in the series, :math:`X_t`, raise :math:`e` (Euler's number) to the power of :math:`X_t`.
Euler's number is the base of the natural logarithm, :math:`ln`.
:math:`R_t = e^{X_t}`
**Examples**
Raise :math:`e` to the power :math:`1`. Returns Euler's number, approximately 2.71828
>>> exp(1)
**See also**
:func:`log`
"""
return np.exp(x) | c4cb057be2dd988a152cc8f224d4bd4300f88263 | 20,681 |
def pil_paste_image(im, mask, start_point=(0, 0)):
"""
:param im:
:param mask:
:param start_point:
:return:
"""
out = Image.fromarray(im)
mask = Image.fromarray(mask)
out.paste(mask, start_point, mask)
return np.asarray(out) | b6393426aa5b7434e64cddc11ed598fca78a2b47 | 20,683 |
import requests
def service_northwind_v2(schema_northwind_v2):
"""https://services.odata.org/V2/Northwind/Northwind.svc/"""
return pyodata.v2.service.Service('http://not.resolvable.services.odata.org/V2/Northwind/Northwind.svc',
schema_northwind_v2, requests) | a7934ff032725589bb11aab9cd84c26d9f2845c3 | 20,684 |
def make_params(params, extra_params):
"""
Creates URL query params by combining arbitrary params
with params designated by keyword arguments and escapes
them to be compatible with HTTP request URI.
Raises an exception if there is a conflict between the
two ways to specify a query param.
"""
params = params or {}
wire_params = {
k: quote(escape(v), b",*[]:/-")
for k, v in (extra_params or {}).items()
if v is not None
}
if set(wire_params).intersection(set(params)):
raise ValueError("Conflict between keyword argument and 'params'")
for k, v in (params or {}).items():
if v is None:
continue
wire_params[k] = quote(escape(v), b",*[]:/-")
return wire_params | f2df0c52675476c0420d40f5ef9053cd2a719194 | 20,686 |
def raw_tag(name, value):
"""Create a DMAP tag with raw data."""
return name.encode('utf-8') + \
len(value).to_bytes(4, byteorder='big') + \
value | 9f86a5a9ebc38fcfd31eb7d76ac8bb01618f6ca7 | 20,687 |
def get_command(tool_xml):
"""Get command XML element from supplied XML root."""
root = tool_xml.getroot()
commands = root.findall("command")
command = None
if len(commands) == 1:
command = commands[0]
return command | 8d50b2675b3a6089b15b5380025ca7def9e4339e | 20,688 |
from whoosh.reading import SegmentReader
def OPTIMIZE(writer, segments):
"""This policy merges all existing segments.
"""
for seg in segments:
reader = SegmentReader(writer.storage, writer.schema, seg)
writer.add_reader(reader)
reader.close()
return [] | e5985641cbe724072f37158196cdaed0600b403e | 20,689 |
def build_features_revenue_model_q2(
df_listings: pd.DataFrame, df_daily_revenue: pd.DataFrame
):
"""Builds the features to be used on the revenue modelling for
answer question 2.
Parameters
----------
df_listings : pd.DataFrame
Pandas dataframe with information about listings.
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
Returns
-------
pd.DataFrame
Returns the input pandas dataframe with the new features added.
"""
data = pd.merge(
df_daily_revenue,
df_listings[["Código", "Comissão"]],
left_on="listing",
right_on="Código",
how="left",
)
data["company_revenue"] = data["Comissão"] * data["revenue"]
data_revenue = (
data.groupby("date")
.agg(company_revenue=("company_revenue", "sum"))
.reset_index()
)
data_revenue = build_date_features(data_revenue, "date")
data = data_revenue.loc[data_revenue["company_revenue"].notna()]
X = data.drop(columns="company_revenue").astype(float)
y = data["company_revenue"]
return X, y | 16658cbc76edf66cf718b008d5fba58414df1f8c | 20,690 |
import gzip
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('../data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f, encoding='latin1')
f.close()
return (training_data, validation_data, test_data) | f021f1db4b0b22c6d89620f44db7e2578c516489 | 20,691 |
def get_elements(xmldoc, tag_name, attribute):
"""Returns a list of elements"""
l = []
for item in xmldoc.getElementsByTagName(tag_name) :
value = item.getAttribute(attribute)
l.append( repr( value ) )
return l | 2cda65802d0dc1ebbb7796f6a43fa9bacfbe852e | 20,693 |
def test_algorithm(circuit, iterations=(1000000)):
"""
Tests a circuit by submitting it to both aer_simulator and PyLinalg.
"""
linalg = PyLinalg()
qlm_circ, _ = qiskit_to_qlm(circuit, sep_measures=True)
test_job = qlm_circ.to_job(nbshots=0, aggregate_data=False)
expected = linalg.submit(test_job)
qiskit_qpu = BackendToQPU(Aer.get_backend('aer_simulator'))
test_job.nbshots = iterations
result = qiskit_qpu.submit(test_job)
dist_calc = compare_results(expected, result, aggregate=False)
distance = analyze_distance(dist_calc)
print("Distance is {}".format(distance))
return distance | ac11f10f9b467ab08275d55515a15d6906076191 | 20,694 |
def return_list_of_sn_host():
""" Return potential SN host names
This includes:
- List of object names in SIMBAD that would correspond to extra-galactic object
- Unknown objects
- objects with failed crossmatch
In practice, this exclude galactic objects from SIMBAD.
"""
list_simbad_galaxies = [
"galaxy",
"Galaxy",
"EmG",
"Seyfert",
"Seyfert_1",
"Seyfert_2",
"BlueCompG",
"StarburstG",
"LSB_G",
"HII_G",
"High_z_G",
"GinPair",
"GinGroup",
"BClG",
"GinCl",
"PartofG",
]
keep_cds = \
["Unknown", "Candidate_SN*", "SN", "Transient", "Fail"] + \
list_simbad_galaxies
return keep_cds | c2a536fc4b742dc0e4a4c57a582174017d6e2877 | 20,695 |
import glob
import re
def folder2catalog(path, granule_trunk='', granule_extension='*', add_sf=False, client=None):
""" Reads a folder of granules into a STAREDataFrame catalog
:param path: Path of the folder containing granules
:type path: str
:param granule_trunk: Granule identifier (e.g. MOD09)
:type granule_trunk: str
:param granule_extension: Extension of the granule (e.g. hdf, nc, HDF5)
:type granule_extension: str
:param add_sf: toggle creating simple feature representation of the iFOVs
:type add_sf: bool
:param client:
:type client:
:return: catalog
:rtype: starepandas.STAREDataFrame
"""
term = '{path}/{granule_trunk}*.{ext}'.format(path=path, granule_trunk=granule_trunk, ext=granule_extension)
s3 = None
if path[0:5] != 's3://':
granule_paths = glob.glob(term)
else:
granule_paths, s3 = starepandas.io.s3.s3_glob(path, '.*\.{ext}$'.format(ext=granule_extension))
if not granule_paths:
print('no granules in folder')
return None
pattern = '.*[^_stare]\.(nc|hdf|HDF5)'
granule_paths = list(filter(re.compile(pattern).match, granule_paths))
df = starepandas.STAREDataFrame()
if client is None:
for granule_path in granule_paths:
if s3 is not None:
granule_url = 's3://{bucket_name}/{granule}'.format(bucket_name=s3[0]['bucket_name'],
granule=granule_path)
else:
granule_url = granule_path
row = make_row(granule_url, add_sf)
df = df.append(row, ignore_index=True)
else:
pass
# client=Client()
# client.close()
df.set_sids('stare_cover', inplace=True)
if add_sf:
df.set_geometry('geom', inplace=True)
return df | 3d1d34a3b2e85ddbfb624289126f077d1668bab4 | 20,696 |
def _check_satellite_low(xbee, is_on_hold):
"""
Check if satellites are low and set the is_on_hold flag.
Args:
xbee(xbee.Zigbee): the XBee communication interface.
is_on_hold(bool): a flag telling if the thread is already on hold.
Returns:
bool: True if low sats, False if cleared.
"""
if shared.status['thread_flag'] & shared.NSATS_TOO_LOW:
if not is_on_hold: _log_and_broadcast(xbee, "IFO,%s low sats hold." % shared.AGENT_ID)
tiime.sleep(0.5)
return True
else: return False | 5ecfdc304a9f6aa5aa41335637f6e783a3643df1 | 20,697 |
import requests
def indexof(path):
"""Returns list of filenames parsed off "Index of" page"""
resp = requests.get(path)
return [a for a, b in file_index_re.findall(resp.text) if a == b] | 38b165bfd4f3dbefedff21c7ac62fb57cd8f2d97 | 20,698 |
from typing import Optional
def get_oversight(xml: str) -> Optional[OversightInfo]:
""" Get oversight """
if val := xml.get('oversight_info'):
return OversightInfo(
has_dmc=val.get('has_dmc', ''),
is_fda_regulated_drug=val.get('is_fda_regulated_drug', ''),
is_fda_regulated_device=val.get('is_fda_regulated_device', ''),
is_unapproved_device=val.get('is_unapproved_device', ''),
is_ppsd=val.get('is_ppsd', ''),
is_us_export=val.get('is_us_export', '')) | fc14da139eb350306175016a2b8d2d036d02b042 | 20,699 |
import fnmatch
def get_user_id(gi,email):
"""
Get the user ID corresponding to a username email
Arguments:
gi (bioblend.galaxy.GalaxyInstance): Galaxy instance
email : email address for the user
Returns:
String: user ID, or None if no match.
"""
user_id = None
try:
for u in get_users(gi):
if fnmatch.fnmatch(u.email,email):
return u.id
except ConnectionError as ex:
logger.warning("Failed to get user list: %s (%s)" % (ex.body,
ex.status_code))
return None | 89b72a4291b789ab61f276a7e4563c17c2c4e4b7 | 20,700 |
def ldns_pkt_size(*args):
"""LDNS buffer."""
return _ldns.ldns_pkt_size(*args) | 833223ed702fdee4525f0c330f4c803bd867daa3 | 20,702 |
def getUser(userID):
""" Takes a user ID as an argument and returns the user associated
with that ID.
Args:
userID -- The ID of a user stored in the Patrons table
"""
user = session.query(Patrons).filter_by(id = userID).one()
return user | 7da06a0baaef540826fd9e902129e1a625f5bfd9 | 20,703 |
import warnings
def guard_transform(transform):
"""Return an Affine transformation instance"""
if not isinstance(transform, Affine):
if tastes_like_gdal(transform):
warnings.warn(
"GDAL-style transforms are deprecated and will not "
"be supported in Rasterio 1.0.",
FutureWarning,
stacklevel=2)
transform = Affine.from_gdal(*transform)
else:
transform = Affine(*transform)
a, e = transform.a, transform.e
if a == 0.0 or e == 0.0:
raise ValueError(
"Transform has invalid coefficients a, e: (%f, %f)" % (
transform.a, transform.e))
return transform | 1c19f92331c0bb99841c86302c1c1d4c13a07649 | 20,704 |
import pandas
import math
def create_heatmap(piek_json,
antske_json,
output_path=None,
verbose=0):
"""
"""
# initialize dataframe
likert_values = [1, 2, 3, 4, 5, 6, 7]
df = pandas.DataFrame()
default_values = [None for _ in range(len(likert_values))]
for likert_value in likert_values:
df[likert_value] = default_values
piek_antske_to_items = defaultdict(list)
keys = list(piek_json.keys())
assert piek_json.keys() == antske_json.keys()
for key in keys:
piek_value = piek_json[key]
antske_value = antske_json[key]
piek_antske_to_items[(piek_value, antske_value)].append(key)
for (piek, antske), items in piek_antske_to_items.items():
num_items = len(items)
df.set_value(piek, antske, len(items))
if verbose >= 2:
print(piek, antske, len(items))
for index, row in df.iterrows():
for column_name, value in row.items():
to_change = False
if value is None:
value = 0
to_change = True
elif math.isnan(value):
value = 0
to_change = True
if to_change:
df.set_value(index, column_name, value)
df = df[df.columns].astype(int)
df = df.drop(df.index[0])
f, ax = plt.subplots(figsize=(9, 6))
plot = sns.heatmap(df, annot=True, fmt="d", linewidths=.5, ax=ax)
ax.invert_yaxis()
if output_path is not None:
plot.figure.savefig(output_path)
return df, ax | 4e26331c3290d8282e98f7473cd7ee6ffbb46146 | 20,705 |
def get_classification_report(true_labels, pred_labels, labels=None, target_names=None, output_dict=False):
"""
true_labels = [0, 1, 2, 3, 4, 1] # Y
pred_labels = [0, 1, 1, 2, 2, 1] # X
target_names = ["A", "B", "C", "D", "E"]
out_result = get_classification_report(true_labels, pred_labels, target_names=target_names, output_dict=False)
宏平均(macro avg)和微平均(micro avg)
如果每个class的样本数量差不多,那么宏平均和微平均没有太大差异
如果每个class的样本数量差异很大,而且你想:
更注重样本量多的class:使用微平均,若微平均比宏平均小,应检检查样本量多的class
更注重样本量少的class:使用宏平均,若宏平均比微平均小,应检查样本量少的class
:param true_labels:
:param pred_labels:
:param labels:
:param target_names:
:param output_dict:
:return:
"""
true_labels = np.array(true_labels, dtype=np.int32)
pred_labels = np.array(pred_labels, dtype=np.int32)
if target_names:
labels = list(range(len(target_names)))
result = metrics.classification_report(true_labels,
pred_labels,
labels=labels,
digits=4,
target_names=target_names,
output_dict=output_dict)
if output_dict:
macro_avg = result["macro avg"]
accuracy = result["accuracy"]
weighted_avg = result["weighted avg"]
out_result = {"macro_avg": macro_avg, "accuracy": accuracy, "weighted_avg": weighted_avg}
# pdf=pd.DataFrame.from_dict(result)
# save_csv("classification_report.csv", pdf)
else:
out_result = result
return out_result | 37467891f004175ec228adac6ca1e1347c71ca15 | 20,706 |
def yx():
"""
测试印象笔记服务
:return:
"""
client = EvernoteClient(token=dev_token,sandbox=False)
client.service_host = 'app.yinxiang.com'
userStore = client.get_user_store()
user = userStore.getUser()
print user
return "yx" | 9bfdcccddfc9a99d445deea5d920256e19e0bcc5 | 20,708 |
def generate_frequency_result_for_time_precedence_query_workload(config_map, time_interval, spatial_interval):
"""
:param config_map:
:param time_interval:
:param spatial_interval:
:return:
"""
frequency_result = {}
for key in config_map.keys():
region_param_list = config_map.get(key)
lon_min = region_param_list[4]
lon_max = region_param_list[5]
lat_min = region_param_list[6]
lat_max = region_param_list[7]
time_min = normalize_to_utc_date(region_param_list[8])
time_max = normalize_to_utc_date(region_param_list[9])
frequency_result[key] = generate_query_frequency_per_region(key, time_interval, spatial_interval, lon_min, lon_max, lat_min, lat_max, time_min, time_max)
print("finish frequency result for query")
return frequency_result | c477acb9652ea40f498a2d92aa91840e868b1736 | 20,709 |
def delete(request):
"""退出登录,清除session"""
request.session.flush()
return redirect('/login/') | 0f10480f3259c52bc1a203ccd9e9746cdf9776ed | 20,710 |
import warnings
def acovf(x, unbiased=False, demean=True, fft=None, missing='none', nlag=None):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how
any NaNs are to be treated.
nlag : {int, None}
Limit the number of autocovariances returned. Size of returned
array is nlag + 1. Setting nlag when fft is False uses a simple,
direct estimator of the autocovariances that only computes the first
nlag + 1 values. This can be much faster when the time series is long
and only a small number of autocovariances are needed.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
if fft is None:
# GH#4937
warnings.warn('fft=True will become the default in a future version '
'of statsmodels/sm2. To suppress this warning, '
'explicitly set fft=False.', FutureWarning)
fft = False
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("`missing` option %s not understood"
% missing) # pragma: no cover
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = np.isnan(x).any()
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x)
if missing == 'conservative':
# Must copy for thread safety (GH#4937)
x = x.copy()
x[~notmask_bool] = 0
else:
# 'drop'
x = x[notmask_bool] # copies non-missing
notmask_int = notmask_bool.astype(int)
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum() / notmask_int.sum()
if missing == 'conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
lag_len = nlag
if nlag is None:
lag_len = n - 1
elif nlag > n - 1:
raise ValueError('nlag must be smaller than nobs - 1')
if not fft and nlag is not None:
# GH#4937
acov = np.empty(lag_len + 1)
acov[0] = xo.dot(xo)
for i in range(lag_len):
acov[i + 1] = xo[i + 1:].dot(xo[:-(i + 1)])
if not deal_with_masked or missing == 'drop':
if unbiased:
acov /= (n - np.arange(lag_len + 1))
else:
acov /= n
else:
if unbiased:
divisor = np.empty(lag_len + 1, dtype=np.int64)
divisor[0] = notmask_int.sum()
for i in range(lag_len):
divisor[i + 1] = np.dot(notmask_int[i + 1:],
notmask_int[:-(i + 1)])
divisor[divisor == 0] = 1
acov /= divisor
else:
# biased, missing data but npt 'drop'
acov /= notmask_int.sum()
return acov
if unbiased and deal_with_masked and missing == 'conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
d[d == 0] = 1
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked:
# biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2 * n - 1)
else:
# biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = np.correlate(xo, xo, 'full')[n - 1:] / d[n - 1:]
if nlag is not None:
# GH#4937 Copy to allow gc of full array rather than view
return acov[:lag_len + 1].copy()
return acov | c318d80a13bcb4f87117ce84351001a8f320c6d0 | 20,711 |
import re
def ParseTimeCommandResult(command_result):
"""Parse command result and get time elapsed.
Args:
command_result: The result after executing a remote time command.
Returns:
Time taken for the command.
"""
time_data = re.findall(r'real\s+(\d+)m(\d+.\d+)', command_result)
time_in_seconds = 60 * float(time_data[0][0]) + float(time_data[0][1])
return time_in_seconds | fc92d4b996716ddb2253bf4eb75ed9860c43b2d7 | 20,712 |
import requests
import json
def _push_aol_to_dativetop_server(aol):
"""Make a PUT request to the DativeTop server in order to push ``aol`` on to
the server's AOL.
"""
try:
resp = requests.put(c.DATIVETOP_SERVER_URL, json=aol)
resp.raise_for_status()
return resp.json(), None
except json.decoder.JSONDecodeError:
msg = ('Failed to parse JSON from DativeTop Server response to our PUT'
' request.')
logger.exception(msg)
return None, msg
except requests.exceptions.RequestException:
msg = 'Failed to push our AOL to the DativeTop Server.'
logger.exception(msg)
return None, msg | 19581efa535e4022fd844292576f56532e9b0cdd | 20,714 |
def dual_single(lag_mul: float, val: np.ndarray, count: np.ndarray) -> float:
"""Weighted average minus 1 for estimate of F_0.
Computes phi_n(lambda_n) - 1.
Args:
lag_mul: The normalized Lagrangian multiplier. Must be
strictly between 0 and 1.
val: Likelihood values (excluding zero and infinity).
count: Counts for likelihood values (including zero and
infinity). Only the first and the last counts can be
zero. Sum must be positive.
Returns:
Weighted average minus 1.
"""
return (
sum(count[1:-1] / (lag_mul + (1 - lag_mul) * val)) + count[0] / lag_mul
) / sum(count) - 1 | 699b33b824e8a0cae1b35f1290281a08a910f7ef | 20,715 |
def _execute(
repository_ctx,
cmdline,
error_msg = None,
error_details = None,
empty_stdout_fine = False,
environment = {}):
"""Executes an arbitrary shell command.
Args:
repository_ctx: the repository_ctx object
cmdline: list of strings, the command to execute
error_msg: string, a summary of the error if the command fails
error_details: string, details about the error or steps to fix it
empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
it's an error
environment: environment variables passed to repository_ctx.execute
Return:
the result of repository_ctx.execute(cmdline)
"""
result = repository_ctx.execute(cmdline, environment = environment)
if result.stderr or not (empty_stdout_fine or result.stdout):
_fail("\n".join([
error_msg.strip() if error_msg else "Repository command failed",
result.stderr.strip(),
error_details if error_details else "",
]))
return result | 1d1a291380ca540ab7ec34bbcec4780b695cc0d1 | 20,716 |
import warnings
def parse_tagged_block(block):
"""
Replaces "data" attribute of a block with parsed data structure
if it is known how to parse it.
"""
key = block.key.decode('ascii')
if not TaggedBlock.is_known(key):
warnings.warn("Unknown tagged block (%s)" % block.key)
decoder = _tagged_block_decoders.get(key, lambda data: data)
return Block(key, decoder(block.data)) | 2c06fb4a20e05690a3b72b85dd09b5a2f04a6513 | 20,717 |
import numpy
def floatX(arr):
"""Converts data to a numpy array of dtype ``theano.config.floatX``.
Parameters
----------
arr : array_like
The data to be converted.
Returns
-------
numpy ndarray
The input array in the ``floatX`` dtype configured for Theano.
If `arr` is an ndarray of correct dtype, it is returned as is.
"""
return numpy.asarray(arr, dtype=theano.config.floatX) | 261064f1685b4e393d493c5cc657b1ab76d5e89f | 20,718 |
def getNumberOfPublicIp():
"""Get the total number of public IP
return: (long) Number of public IP
"""
#No need to calculate this constant everytime
return 3689020672
# Real implementation:
#ranges = getValidPublicIpRange()
#number_of_ip = 0
#for range in ranges:
# number_of_ip = number_of_ip + (range[1] - range[0] + 1)
#return number_of_ip | 79221376f64d0a44da06746bc28f0bb7db808b0f | 20,719 |
def cart2sph(x, y, z):
"""
Convert Cartesian coordinates x, y, z
to conventional spherical coordinates r, p, a
:param x: Cartesian coordinate or vector x
:type x: float or np.ndarray
:param y: Cartesian coordinate or vector y
:type y: float or np.ndarray
:param z: Cartesian coordinates or vector z
:type z: float or np.ndarray
:return: Spherical coordinates: radius, polar angle, and azimuth angle
:rtype: np.ndarray
"""
r = (x ** 2 + y ** 2 + z ** 2) ** 0.5
p = np.arccos(z / r)
a = np.arctan2(y, x)
return np.array([r, p, a]) | 9487d44a8892f450a2920997f277f0f699a89e2d | 20,722 |
def hdfs_open(server, username, path, **args):
"""Read a file.
Returns a filelike object (specifically, an httplib response object).
"""
datanode_url = datanode_url(server, username, path, **args)
response = _datanode_request(server, username, 'GET', datanode_url)
if response.status == httplib.OK:
return response
else:
content = response.read()
_raise_error(response.status, content) | 6ddc83d6d571d63ce5536f7c34a00c7c3b04f1fc | 20,723 |
def get_serv_loader(desc_file=SERVICES_FILE):
"""Get a ServiceLoader with service descriptions in the given file.
Uses a "singleton" when the file is `SERVICES_FILE`.
"""
global _serv_loader
if desc_file == SERVICES_FILE:
if _serv_loader is None:
with open(desc_file, "r") as fp:
_serv_loader = ServiceLoader(fp)
return _serv_loader
with open(desc_file, "r") as fp:
ld = ServiceLoader(fp)
return ld | 3693b4fd11bc2efcd394eac94e6667b84d110b34 | 20,724 |
import socket
def check_reverse_lookup():
"""
Check if host fqdn resolves to current host ip
"""
try:
host_name = socket.gethostname().lower()
host_ip = socket.gethostbyname(host_name)
host_fqdn = socket.getfqdn().lower()
fqdn_ip = socket.gethostbyname(host_fqdn)
return host_ip == fqdn_ip
except socket.error:
pass
return False | 4979ba32d03782258f322ec86b2cd1c24fb4de2c | 20,725 |
from typing import Sequence
def _clean_bar_plot_data(df_in: pd.DataFrame,
sweep_vars: Sequence[Text] = None) -> pd.DataFrame:
"""Clean the summary data for bar plot comparison of agents."""
df = df_in.copy()
df['env'] = pd.Categorical(
df.bsuite_env, categories=_ORDERED_EXPERIMENTS, ordered=True)
df['type'] = pd.Categorical(
df['type'], categories=_ORDERED_TYPES, ordered=True)
if sweep_vars is None:
df['agent'] = 'agent'
elif len(sweep_vars) == 1:
df['agent'] = df[sweep_vars[0]].astype(str)
else:
df['agent'] = (df[sweep_vars].astype(str)
.apply(lambda x: x.name + '=' + x, axis=0)
.apply(lambda x: '\n'.join(x), axis=1) # pylint:disable=unnecessary-lambda
)
return df | a62feba3511dccd0d6a909531d5f50b96348e072 | 20,728 |
def file_resources():
"""File Resources."""
return {
'mock_file': CustomFileResource(service=FileService()),
'mock_file_action': CustomFileActionResource(service=FileService()),
} | 412bdb59f04c2092c7bd77ef677cfac43bbc27ff | 20,729 |
import pickle
def prepare_data(features=None):
"""Prepare data for analysis
Args:
features (list of str): list with features
Returns:
X_train (np.matrix): train X
X_test (np.matrix): test X
y_train (np.matrix): train y
y_test (np.matrix): test y
"""
# Read data
xls = pd.ExcelFile('Database.xlsx')
db1 = xls.parse(1)
db2 = xls.parse(2)
db1.loc[np.isnan(db1['Sales']), 'Sales'] = 0
y = (db1['Sales']).as_matrix()
# Fill the premium column in db2
db2['Premium Offered'] = db1['Premium Offered'].mean()
# To get all columns in X, we need to mix it with the training data
if features is None:
features = [x for x in db1.columns if x not in not_features]
db3 = pd.concat([db1[features], db2[features]], axis=0)
# Generate an X matrix
Xall = proccess_X(db3, features)
X = Xall[:db1.shape[0], :]
X2 = Xall[db1.shape[0]:, :]
# Train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42)
# Pickle the data
data = {'X1': X, 'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test, 'X2': X2}
pickle.dump(data, open('rf_data.dat', 'wb'))
return X_train, X_test, y_train, y_test | b880e89c4bd5b8df80c385141f7f34e71164dc37 | 20,730 |
def choose_my_art_date(my_location, google_maps_key, mapping = False, search_range = 500, min_rating = 4.3):
"""
Function to select an artsy date and dinner; randomly selects local arts event from NY ArtBeat API
found at https://www.nyartbeat.com/resources/doc/api, and uses the arts event data to determine a nearby restaurant.
Args
----
Required:
my_location(str): Starting point address - must be within NYC Metro Location
google_maps_key (str): Optional google maps API key needed to geocode your location
To obtain a google maps API key, please refer to https://developers.google.com/maps
Optional:
search_range(float): Distance from starting point (radius for search, meters)
Default: 500
min_rating(float): should be 1-5
Default: 4.3
mapping(bool): Boolean param specifying whether user wants a simple interactive map returned of matching locations
Default: False
Returns
---
DataFrame with [max_results] art events in the New York Metro area in the [radius] of the [specified location]
Fields:
Event_Name(str): Name of Event
Event_Description(str): Details about event
Event_Price_Adult(float): Price for tickets
DateEnd(date): Last date for exhibit or installation
Event_Lat(float): Latitude of event
Event_Lon(float): Longitude of event
Event_Address(str): Address for event - requires geocoding.
Restaurant_Name(str): Name of restaurant
Price_Level(str): $ - $$$$
Restaurant_Rating(float): 1-5
Restaurant_Address(str): Distance from starting point (my location)
Restaurant_Lat(float): Latitude of restaurant
Restaurant_Lon(float): Longitude of restaurant
Map (Optional): Interactive Google Maps Output with Markers for selected restaurant and selected event.
Usage Example
---
[in]:
choose_my_art_date("Met Museum", google_maps_key)
[out]:
df
| Event_Name | Eugène Leroy “About Marina”
| Event_Description | Michael Werner Gallery, New York presents an e...
| Price | Free
| DateEnd | 2021-12-23
| Distance | 438.962726
| Event_Lat | 40.775625
...
[out]:
Interactive Map
"""
lat,lon = geocoding(my_location = my_location, google_maps_key = google_maps_key)
df_events = find_my_art_events(my_location = my_location, google_maps_key = google_maps_key, lat = lat, lon = lon, mapping = False, search_range = search_range)
selected_event_row = df_events.sample(n = 1)
event_lat = selected_event_row['Event_Lat'].values
event_lon = selected_event_row['Event_Lon'].values
df_dinner = find_my_dinner(lat = event_lat, lon = event_lon, google_maps_key = google_maps_key, mapping = False, search_range = search_range)
selected_restaurant_row = df_dinner.sample(n = 1)
date_night_df = pd.concat([selected_event_row,selected_restaurant_row], axis=1).unstack().reset_index().dropna().drop(columns = ['level_1']).rename(columns = {'level_0':'Field',0:'Value'})
if mapping == True:
lat_lon_df = pd.concat([selected_event_row[['Event_Name','Event_Lat','Event_Lon']].rename(columns = {'Event_Name':'Name','Event_Lat':'Lat','Event_Lon':'Lon'}),\
selected_restaurant_row[['Restaurant_Name','Restaurant_Lat','Restaurant_Lon']].rename(columns = {'Restaurant_Name':'Name','Restaurant_Lat':'Lat','Restaurant_Lon':'Lon'})], axis=0).reset_index()
nymap = map_events(lat_lon_df, google_maps_key, name_column = 'Name', start_lat = lat, start_lon = lon, lat_column = 'Lat', long_column = 'Lon')
return date_night_df,nymap
else:
return date_night_df | a33ffc38c4bfb648fd4a68087844484817c8ee02 | 20,731 |
import hashlib
def hash_text(message: str, hash_alg: str = 'keccak256') -> str:
"""get the hash of text data
:param message: str
:param hash_alg: str, `keccak256` or `sha256`, the default value is `keccak256`
:return: hex str, digest message with `keccak256` or `sha256`
"""
if hash_alg == 'keccak256':
_hash = keccak256(text=message)
elif hash_alg == 'sha256':
_hash = hashlib.sha256(message.encode()).hexdigest()
else:
raise ValueError(f'unsupport hash_alg param, hash_alg: {hash_alg}')
return _hash | da161ee7573ff1af9d644e04641cbb844b2311ca | 20,733 |
def get_grains_connected_to_face(mesh, face_set, node_id_grain_lut):
"""
This function find the grain connected to the face set given as argument.
Three nodes on a grain boundary can all be intersected by one grain
in which case the grain face is on the boundary or by two grains. It
is therefore sufficient to look at the set of grains contained by any
three nodes in the face set and take the intersection of these sets.
:param mesh: The mesh
:type mesh: :class:`Mesh`
:param face_set: The face set to find grains connected to
:type: face_set: :class:`ElementSet`
:return: The grain identifiers that intersect the face.
:rtype: list of ints
"""
grains_connected_to_face = []
grains = face_set.name[4:].split("_")
if len(grains) == 2:
return [int(g) for g in grains]
triangle_element = mesh.elements[face_set.ids[0]]
for node_id in triangle_element.vertices:
grains_with_node_id = node_id_grain_lut[node_id]
grains_connected_to_face.append(set(grains_with_node_id))
return list(set.intersection(*grains_connected_to_face)) | cb4adff2d6ffe3c32e2a1fc8058e6ad1fed9b2c9 | 20,734 |
def get_git_projects(git_worktree, args,
default_all=False,
use_build_deps=False,
groups=None):
""" Get a list of git projects to use """
git_parser = GitProjectParser(git_worktree)
groups = vars(args).get("groups")
if groups:
use_build_deps = False
if use_build_deps:
# To avoid getting all the projects when no project is given
# and running from the subdir of a build project
if not at_top_worktree(git_worktree):
default_all = False
build_worktree = qibuild.worktree.BuildWorkTree(git_worktree.worktree)
build_parser = GitBuildProjectParser(git_worktree, build_worktree)
return build_parser.parse_args(args, default_all=default_all)
if groups:
return git_worktree.get_git_projects(groups=groups)
return git_parser.parse_args(args, default_all=default_all) | afcb3e68d5e023937bbcdb9e86f9d50f9dc63e78 | 20,735 |
from typing import Literal
def RmZ(
ps: Table,
r_band: Literal["9601", "9602"] = "9602",
z_band: Literal["9801", "9901"] = "9901",
**kw
) -> units.mag:
"""R-Z color.
Parameters
----------
ps : astropy.table.Table
need arguments for r(z)_band functions
r_band: {'9601', '9602'}
R band to use
(default '9602')
z_band: {'9801', '9901'}
Z band to use
(default '9901')
kwargs
passes to R & Z-band functions
Returns
-------
R-Z color
"""
return _b1mb2(ps, "R_MP" + r_band, "Z_MP" + z_band, **kw) | 9c659933788a36409361941f65c4d29e73ec0b9f | 20,736 |
from typing import List
def get_table_names(connection: psycop.extensions.connection) -> List[str]:
"""
Report the name of the tables.
E.g., tables=['entities', 'events', 'stories', 'taxonomy']
"""
query = """
SELECT table_name
FROM information_schema.tables
WHERE table_type = 'BASE TABLE'
AND table_schema = 'public'
"""
cursor = connection.cursor()
cursor.execute(query)
tables = [x[0] for x in cursor.fetchall()]
return tables | 7602e998b8d4431041eb688cc5ed2450f4dc49bc | 20,737 |
def mrpxmrp(sigmaset1, sigmaset2):
"""in work; returns transformation [FN] = [FB(s2)][BN(s1)]
"""
q1 = np.array(sigmaset1)
q2 = np.array(sigmaset2)
sig1_norm = norm(sigmaset1)
sig2_norm = norm(sigmaset2)
scalar1 = 1 - sig1_norm**2
scalar2 = 1 - sig2_norm**2
scalar3 = 2.
denom = 1 + sig1_norm**2*sig2_norm**2-2*vec.vdotv(sigmaset1, sigmaset2)
term1 = vec.vxs(scalar1, sigmaset2)
term2 = vec.vxs(scalar2, sigmaset1)
term3 = vec.vxs(2, vec.vcrossv(sigmaset2, sigmaset1))
numer = vec.vxadd(term1, vec.vxadd(term2, -term3))
sigma = vec.vxs(denom, numer)
# sigma = (1-(q1.T*q1))*q2+(1-(q2*q2.T))*q1+2*np.cross(q1.T,q2.T).T;
# sigma = sigma/(1+q1.T*q1 * q2.T*q2-2*q1.T*q2);
return np.array(sigma) | 4df11a8f93c857e44fc98a59ff9a5a4325d29eef | 20,738 |
def continuous_partition_data(data, bins='auto', n_bins=10):
"""Convenience method for building a partition object on continuous data
Args:
data (list-like): The data from which to construct the estimate.
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
"""
if bins == 'uniform':
bins = np.linspace(start=np.min(data), stop=np.max(data), num = n_bins+1)
elif bins =='ntile':
bins = np.percentile(data, np.linspace(start=0, stop=100, num = n_bins+1))
elif bins != 'auto':
raise ValueError("Invalid parameter for bins argument")
hist, bin_edges = np.histogram(data, bins, density=False)
return {
"bins": bin_edges,
"weights": hist / len(data)
} | 07ab1663a4b2a4d62f2e8fce3c49d0c8c135d9e7 | 20,739 |
import csv
def readInput(infile,genefile, segfile):
"""
Reads input files.
Extended description of function.
Parameters:
infile (str): File containing list of genes to be analyzed
genefile (str): File containing gene range definitions
segfile (str): File containing cell line intervals and copy number data
Returns:
genes (list): List of genes
genedef (dict): Dictionary of genes mapping to corresponding intervals
interval_dict(dict): Dictionary of dictionary of interval trees containing cell line ranges
"""
with open(infile) as inf:
genes = [i.strip() for i in inf.readlines()]
with open(genefile) as genef:
dictgenes = csv.DictReader(genef, delimiter="\t")
genedef = {}
for d in dictgenes:
if d["cds_from"] != "-" and d["cds_to"] != "-":
genedef[d["gene"]] = (d["#chromosome"], Interval(int(d["cds_from"]),int(d["cds_to"])))
with open(segfile) as seg:
interval_dict = {}
dictseg = csv.DictReader(seg, delimiter="\t")
for d in dictseg:
d = dict(d)
if "e" in d["End"]:
#Replace one incorrect exponential value
d["End"] = 115000000
if d["CCLE_name"] in interval_dict:
if d["Chromosome"] in interval_dict[d["CCLE_name"]]:
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
else:
interval_dict[d["CCLE_name"]][d["Chromosome"]] = IntervalTree()
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
else:
interval_dict[d["CCLE_name"]] = dict()
interval_dict[d["CCLE_name"]][d["Chromosome"]] = IntervalTree()
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
return genes, genedef, interval_dict | 225a0403c1b8875ec7f40c6511edc1c1828dd2aa | 20,740 |
def library_name(name, suffix=SHLIB_SUFFIX, is_windows=is_windows):
"""
Convert a file basename `name` to a library name (no "lib" and ".so" etc.)
>>> library_name("libpython3.7m.so") # doctest: +SKIP
'python3.7m'
>>> library_name("libpython3.7m.so", suffix=".so", is_windows=False)
'python3.7m'
>>> library_name("libpython3.7m.dylib", suffix=".dylib", is_windows=False)
'python3.7m'
>>> library_name("python37.dll", suffix=".dll", is_windows=True)
'python37'
"""
if not is_windows and name.startswith("lib"):
name = name[len("lib"):]
if suffix and name.endswith(suffix):
name = name[:-len(suffix)]
return name | 56c19da17acd6d00006e9c1e4308148ab7bc18d8 | 20,741 |
def add_light(light_type: str = 'POINT') -> str:
"""
Add a light of the given type to the scene, return
the name key of the newly added light
:param light_type:
:return: The named key used to index the object
"""
if utils.is_new_api():
bpy.ops.object.light_add(type=light_type)
else:
bpy.ops.object.lamp_add(type=light_type)
light_obj = bpy.context.selected_objects[0]
# Enable contact shadows
if utils.is_new_api():
light_obj.data.use_contact_shadow = True
# Return the name
return light_obj.name | ac1f5e66a3baf90f2e603069e967b8ed3a76bf8d | 20,742 |
def set_constants(ze=40, p=0.4,
kc_min=0.01, kc_max=1.0,
snow_alpha=0.2, snow_beta=11.0,
ke_max=1.0,
a_min=0.45, a_max=0.90):
"""
:param ze:
:param p: the fraction of TAW that a crop can extract from the root zone without suffering water stress; ASCE pg 226
:param kc_min:
:param kc_max:
:param snow_alpha:
:param snow_beta:
:param ke_max:
:param a_min:
:param a_max:
:return:
"""
d = dict(s_mon=datetime(1900, 7, 1),
e_mon=datetime(1900, 10, 1),
ze=ze, p=p,
kc_min=kc_min, kc_max=kc_max,
snow_alpha=snow_alpha, snow_beta=snow_beta,
ke_max=ke_max,
a_min=a_min, a_max=a_max)
print 'constants dict: {}\n'.format(pformat(d, indent=2))
return d | d2c02eb49c59c203b9520b32e94aaceac611abcc | 20,743 |
def project_poses(poses, P):
"""Compute projected poses x = Pp."""
assert poses.ndim == 2 and poses.shape[-1] == 3, \
'Invalid pose dim at ext_proj {}'.format(poses.shape)
assert P.shape == (3, 4), 'Invalid projection shape {}'.format(P.shape)
p = np.concatenate([poses, np.ones((len(poses), 1))], axis=-1)
x = np.matmul(P, p.T)
return x.T | 943c935791744ec3ec6f476c16911d7c90f2024b | 20,744 |
def argmin(x):
"""
Returns the index of the smallest element of the iterable `x`.
If two or more elements equal the minimum value, the index of the first
such element is returned.
>>> argmin([1, 3, 2, 0])
3
>>> argmin(abs(x) for x in range(-3, 4))
3
"""
argmin_ = None
min_ = None
for (nItem, item) in enumerate(x):
if (argmin_ is None) or (item < min_):
argmin_ = nItem
min_ = item
return argmin_ | 8d6778182bf3c18ffa6ef72093bf19a818d74911 | 20,745 |
def find_spot(entry, list):
"""
return index of entry in list
"""
for s, spot in enumerate(list):
if entry==spot:
return s
else:
raise ValueError("could not find entry: "+ str(entry)+ " in list: "+ str(list)) | e218822e5e56a62c40f5680751c1360c56f05f4a | 20,746 |
def parse_file(path, game=None, path_relative_to_game=True, verbose=False):
"""
Parse a single file and return a Tree.
path, game:
If game is None, path is a full path and the game is determined from that.
Or game can be supplied, in which case path is a path relative to the game directory.
"""
if not path_relative_to_game:
pass
else:
path, game = pyradox.config.combine_path_and_game(path, game)
encodings = game_encodings[game]
lines = readlines(path, encodings)
if verbose: print('Parsing file %s.' % path)
token_data = lex(lines, path)
return parse_tree(token_data, path) | 64673afea557ad2d74eb4cdcc56d6f6f6e2cd1f6 | 20,747 |
def svn_repos_fs_commit_txn(*args):
"""svn_repos_fs_commit_txn(svn_repos_t * repos, svn_fs_txn_t * txn, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_fs_commit_txn(*args) | 6aebf604435485aba694b46ab58f5e4ca7d1b549 | 20,748 |
def iter_dir(temp_dir, blast_db, query_name, iteration):
"""
Get the work directory for the current iteration.
We need to call this function in child processes so it cannot be in an
object.
"""
name = '{}_{}_{:02d}'.format(
basename(blast_db), basename(query_name), iteration)
return join(temp_dir, name) | e4a8acf79fa3ef6b3822d91406cd6cf007477308 | 20,750 |
def zoomSurface(src, zoomx, zoomy, smooth):
"""Zooms a surface with different x & y scaling factors.
This function renders to a new surface, with optional anti-aliasing. If a
zoom factor is negative, the image will be flipped along that axis. If the
surface is not 8-bit or 32-bit RGBA/ABGR, it will be converted into a 32-bit
RGBA format on the fly.
Args:
src (:obj:`SDL_Surface`): The surface to zoom.
zoomx (float): The x-axis (horizontal) zoom factor.
zoomy (float): The y-axis (vertical) zoom factor.
smooth (int): If set to 1, the output image will be anti-aliased. If set
to 0, no anti-aliasing will be performed. Must be either 0 or 1.
Returns:
:obj:`SDL_Surface`: A new output surface with zoom applied.
"""
return _funcs["zoomSurface"](src, zoomx, zoomy, smooth) | 08e7ad74a4420f5b02a6924d47cdecf3dc09229b | 20,751 |
import struct
def array(*cols: Column) -> Column:
"""
Return column of arrays
"""
return (struct(*cols).apply(list)).alias(f"[{', '.join([Column.getName(c) for c in cols])}]") | 65599cd0fa7b0ea5b670555656a9b615e0e68664 | 20,752 |
def prepare_inputs(boxes, digits_occurrence):
"""
:param boxes:
2D list of 81 gray OpenCV images (2D numpy arrays)
:param digits_occurrence:
2D numpy array that contains True or False values that represent occurrence of digits
:return:
if no digit was found returns None;
otherwise returns 4D numpy array with shape = (digits count, 28, 28, 1) that
contains cropped, scaled and centered digits that are perfectly prepared for a cnn model
(at least for this model I created)
"""
digits_count = 0
for y in digits_occurrence:
for x in y:
digits_count += int(x)
if digits_count == 0:
return None
cropped_boxes_with_digits = get_cropped_boxes_with_digits(boxes, digits_occurrence)
digits = get_cropped_digits(cropped_boxes_with_digits)
if digits is None:
return None
resize(digits)
digits = add_margins(digits, 28, 28)
center_using_mass_centers(digits)
digits = digits.reshape((digits.shape[0], 28, 28, 1))
digits = digits / 255
return digits | 27c97e374f9cbdb0d427acacb5645819d05a2aea | 20,754 |
def evaluate_constants(const_arrays, expr): # pragma: no cover
"""Convert constant arguments to cupy arrays, and perform any possible
constant contractions.
"""
return expr(*[to_cupy(x) for x in const_arrays], backend='cupy', evaluate_constants=True) | 7a371d0cb262fb530873825dee02a8201913b2c1 | 20,755 |
def setup_family(dompc, family, create_liege=True, create_vassals=True,
character=None, srank=None, region=None, liege=None,
num_vassals=2):
"""
Creates a ruler object and either retrieves a house
organization or creates it. Then we also create similar
ruler objects for an npc liege (if we should have one),
and npc vassals (if we should have any). We return a tuple of
our ruler object, our liege's ruler object or None, and a list
of vassals' ruler objects.
"""
vassals = []
# create a liege only if we don't have one already
if create_liege and not liege:
name = "Liege of %s" % family
liege = setup_ruler(name)
ruler = setup_ruler(family, dompc, liege)
if create_vassals:
vassals = setup_vassals(family, ruler, region, character, srank, num=num_vassals)
return ruler, liege, vassals | 9cd1fda01685c071f79e876add49e5ffe5b23642 | 20,756 |
def normalize_line(line: dict, lang: str):
"""Apply normalization to a line of OCR.
The normalization rules that are applied depend on the language in which
the text is written. This normalization is necessary because Olive, unlike
e.g. Mets, does not encode explicitly the presence/absence of whitespaces.
:param dict line: A line of OCR text.
:param str lang: Language of the text.
:return: A new line of text.
:rtype: dict
"""
mw_tokens = [
token
for token in line["t"]
if "qid" in token
]
# apply normalization only to those lines that contain at least one
# multi-word token (denoted by presence of `qid` field)
if len(mw_tokens) > 0:
line = merge_pseudo_tokens(line)
line = normalize_hyphenation(line)
for i, token in enumerate(line["t"]):
if "qid" not in token and "nf" in token:
del token["nf"]
if "qid" in token:
del token["qid"]
if i == 0 and i != len(line["t"]) - 1:
insert_ws = insert_whitespace(
token["tx"],
line["t"][i + 1]["tx"],
None,
lang
)
elif i == 0 and i == len(line["t"]) - 1:
insert_ws = insert_whitespace(
token["tx"],
None,
None,
lang
)
elif i == len(line["t"]) - 1:
insert_ws = insert_whitespace(
token["tx"],
None,
line["t"][i - 1]["tx"],
lang
)
else:
insert_ws = insert_whitespace(
token["tx"],
line["t"][i + 1]["tx"],
line["t"][i - 1]["tx"],
lang
)
if not insert_ws:
token["gn"] = True
return line | f158d3d9811752e009a7f0aa6f94b4a67e322960 | 20,757 |
def attribute_summary(attribute_value, item_type, limit=None):
"""Summarizes the information in fields attributes where content is
written as an array of arrays like tag_cloud, items, etc.
"""
if attribute_value is None:
return None
items = ["%s (%s)" % (item, instances) for
item, instances in attribute_value]
items_length = len(items)
if limit is None or limit > items_length:
limit = items_length
return "%s %s: %s" % (items_length, type_singular(item_type,
items_length == 1),
", ".join(items[0: limit])) | a835e985ce4f9c6d82bdaa267589a802fb865d26 | 20,758 |
import json
def root():
"""Base view."""
new_list = json.dumps(str(utc_value))
return new_list | 7d94ea10dc944d1cef13dfea7cc12ccc5bc9f742 | 20,759 |
def plc_read_db(plc_client, db_no, entry_offset, entry_len):
"""
Read specified amount of bytes at offset from a DB on a PLC
"""
try:
db_var = plc_client.db_read(db_no, entry_offset, entry_len)
except Exception as err:
print "[-] DB read error:", err
sys.exit(1)
db_val = struct.unpack('!f', binascii.hexlify(db_var).decode('hex'))[0]
return db_val | 324c81b1192c90c3909bf4e6cb65e590ce48f59a | 20,760 |
def rank_adjust(t, c=None):
"""
Currently limited to only Mean Order Number
Room to expand to:
Modal Order Number, and
Median Order Number
Uses mean order statistic to conduct rank adjustment
For further reading see:
http://reliawiki.org/index.php/Parameter_Estimation
Above reference provides excellent explanation of how this method is
derived this function currently assumes good input
"""
# Total items in test/population
N = len(t)
# Preallocate adjusted ranks array
ranks = np.zeros(N)
if c is None:
c = np.zeros(N)
# Rank adjustment for [right] censored data
# PMON - "Previous Mean Order Number"
# NIPBSS - "Number of Items Before Present Suspended Set"
PMON = 0
for i in range(0, N):
if c[i] == 0:
NIBPSS = N - i
ranks[i] = PMON + (N + 1 - PMON) / (1 + NIBPSS)
PMON = ranks[i]
elif c[i] == 1:
ranks[i] = np.nan
else:
# ERROR
raise ValueError("Censoring flag must be 0 or 1 with rank_adjust")
return ranks | c79d308dd333c96abe64274918fcd294d24f7d40 | 20,762 |
from datetime import datetime
import requests
def login_captcha(username, password, sid):
"""
bilibili login with captcha.
depend on captcha recognize service, please do not use this as first choice.
Args:
username: plain text username for bilibili.
password: plain text password for bilibili.
sid: session id
Returns:
code: login response code (0: success, -105: captcha error, ...).
access_token: token for further operation.
refresh_token: token for refresh access_token.
sid: session id.
mid: member id.
expires_in: access token expire time (30 days)
"""
jsessionid, captcha_img = get_capcha(sid)
captcha_str = recognize_captcha(captcha_img)
hash, pubkey, sid = get_key(sid, jsessionid)
encrypted_password = cipher.encrypt_login_password(password, hash, pubkey)
url_encoded_username = parse.quote_plus(username)
url_encoded_password = parse.quote_plus(encrypted_password)
post_data = {
'appkey': APPKEY,
'captcha': captcha_str,
'password': url_encoded_password,
'platform': "pc",
'ts': str(int(datetime.now().timestamp())),
'username': url_encoded_username
}
post_data['sign'] = cipher.sign_dict(post_data, APPSECRET)
# avoid multiple url parse
post_data['username'] = username
post_data['password'] = encrypted_password
post_data['captcha'] = captcha_str
headers = {
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': '',
'Accept-Encoding': 'gzip,deflate',
}
r = requests.post(
"https://passport.bilibili.com/api/oauth2/login",
headers=headers,
data=post_data,
cookies={
'JSESSIONID': jsessionid,
'sid': sid
}
)
response = r.json()
if response['code'] == 0:
login_data = response['data']
return response['code'], login_data['access_token'], login_data['refresh_token'], sid, login_data['mid'], login_data["expires_in"]
else:
return response['code'], None, None, sid, None, None | de0ee5d865ea11b32f155fb3a3470fff9b13202b | 20,763 |
def ml_transitions(game, attach=True, verbose=False):
"""
dataframe to directional line movement arrays
"""
transition_classes = []
prev = [None, None]
for i, row in game.iterrows():
cur = list(row[["a_ml", "h_ml"]])
transition_class = analyze.classify_transition(prev, cur)
transition_classes.append(transition_class)
prev = cur
if attach:
trans_df = pd.DataFrame(transition_classes)
trans_df = trans_df.add_prefix("trans_class_")
ret = pd.concat([game, trans_df], axis=1)
else:
ret = transition_classes
if verbose:
strings = {i: s for i, s in enumerate(bm.TRANSITION_CLASS_STRINGS)}
for i, t in enumerate(transition_classes):
class_num = np.argmax(t)
print(f"{i}: {strings[class_num]}")
return ret | 3156f377f4c78b30cabe27f1dc37a277b25ebde6 | 20,764 |
def plugin_uninstall(plugin, flags=None, kvflags=None):
"""
Uninstall a Helm plugin.
Return True if succeed, else the error message.
plugin
(string) The plugin to uninstall.
flags
(list) Flags in argument of the command without values. ex: ['help', '--help']
kvflags
(dict) Flags in argument of the command with values. ex: {'v': 2, '--v': 4}
CLI Example:
.. code-block:: bash
salt '*' helm.plugin_uninstall PLUGIN
"""
return _exec_true_return(
commands=["plugin", "uninstall", plugin], flags=flags, kvflags=kvflags
) | f162d37cb48e3cc0ebb3294ec4fe33b9fc56d8f0 | 20,765 |
def geom_to_tuple(geom):
"""
Takes a lat/long point (or geom) from KCMO style csvs.
Returns (lat, long) tuple
"""
geom = geom[6:]
geom = geom.replace(" ", ", ")
return eval(geom) | 003f25a0ebc8fd372b63453e4782aa52c0ad697c | 20,766 |
def b_s_Poole(s, V_max, z, halo_type, bias_type):
""" This function expresses Equation (2) of Poole et al (2014)
and fetches the parameters needed to compute it.
Args:
s (numpy.ndarray) : scale values
V_max (float) : halo maximum circular velocity
z (float) : redshift of interest
halo_type (str) : halo type
bias_type (str) : bias type
Returns:
A list containing two arrays with the values of `b_s`
and `b_x` at each scale
"""
# Set the bias parameters
[b_x, s_o, V_SF] = set_b_s_Poole_params(V_max, z, halo_type, bias_type)
# Create b(s) arrays
V_max_norm = V_max/220.
if(V_max_norm < V_SF):
b_s = b_x*(1.-(s_o/s))**0.5
else:
b_s = b_x*(1.+(s_o/s))**0.5
return([b_s, b_x]) | 7ee59c4e3052cab74932c9e78dfc04af92a2284e | 20,767 |
def rasterize_polygon(poly_as_array, shape, geo_ref):
"""
Return a boolean numpy mask with 1 for cells within polygon.
Args:
poly_as_array: A polygon as returned by ogrpoly2array (list of numpy arrays / rings)
shape: Shape (nrows, ncols) of output array
geo_ref: GDAL style georeference of grid.
Returns:
Numpy boolean 2d array.
"""
xy = mesh_as_points(shape, geo_ref)
return points_in_polygon(xy, poly_as_array).reshape(shape) | 506fbc7b1e215e52e1ee469cb88ffc4b4d45dd89 | 20,770 |
def get_reg_part(reg_doc):
"""
Depending on source, the CFR part number exists in different places. Fetch
it, wherever it is.
"""
potential_parts = []
potential_parts.extend(
# FR notice
node.attrib['PART'] for node in reg_doc.xpath('//REGTEXT'))
potential_parts.extend(
# e-CFR XML, under PART/EAR
node.text.replace('Pt.', '').strip()
for node in reg_doc.xpath('//PART/EAR')
if 'Pt.' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/HEADING
node.text.replace('PART', '').strip()
for node in reg_doc.xpath('//FDSYS/HEADING')
if 'PART' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/GRANULENUM
node.text.strip() for node in reg_doc.xpath('//FDSYS/GRANULENUM'))
potential_parts = [p for p in potential_parts if p.strip()]
if potential_parts:
return potential_parts[0] | 33f4c2bb9a4e2f404e7ef94a3bfe3707a3b1dd93 | 20,772 |
from pathlib import Path
import json
def load_configuration(module: str, configs_path=None) -> dict:
"""
Load the configuration and return the dict of the configuration loaded
:param module: The module name to load the configuration.
:type module: str
:param configs_path: path where to check configs. Default `configs/modules/`
:type configs_path: str
:return: Dict of the configuration if present.
:rtype: dict
:raise FileNotFoundError: If configuration file not found
"""
Validator().string(module)
module = module.lower()
if configs_path:
module_path = Path(f"{configs_path}{module}.json") # search for config file
if not module_path.exists():
raise FileNotFoundError(
f"Couldn't find the configuration file of the module {module_path.absolute()}"
)
else:
server_path = Path(
f"configs{sep}modules{sep}server{sep}{module}.json"
) # search for config file in server
android_path = Path(
f"configs{sep}modules{sep}android{sep}{module}.json"
) # search for config file in android
if server_path.exists():
module_path = server_path
elif android_path.exists():
module_path = android_path
else:
raise FileNotFoundError(
f"Couldn't find the configuration file of the module {module}.json"
)
with module_path.open() as mod_file:
mod_data = json.load(mod_file)
return mod_data | 2a5e0a798b3c3c3dd1570ee79d1e3ce5d6eff97b | 20,773 |
def direct(input_writer, script_str, run_dir, prog,
geo, charge, mult, method, basis, **kwargs):
""" Generates an input file for an electronic structure job and
runs it directly.
:param input_writer: elstruct writer module function for desired job
:type input_writer: elstruct function
:param script_str: string of bash script that contains
execution instructions electronic structure job
:type script_str: str
:param run_dir: name of directory to run electronic structure job
:type run_dir: str
:param prog: electronic structure program to run
:type prog: str
:param geo: cartesian or z-matrix geometry
:type geo: tuple
:param charge: molecular charge
:type charge: int
:param mult: spin multiplicity
:type mult: int
:param method: electronic structure method
:type method: str
:returns: the input string, the output string, and the run directory
:rtype: (str, str)
"""
input_str = input_writer(
prog=prog,
geo=geo, charge=charge, mult=mult, method=method, basis=basis,
**kwargs)
output_strs = from_input_string(script_str, run_dir, input_str)
output_str = output_strs[0]
return input_str, output_str | e14e41aac6f682d283094a4e0755dcc8949de269 | 20,774 |
def get_ids(records, key):
"""Utility method to extract list of Ids from Bulk API insert/query result.
Args:
records (:obj:`list`): List of records from a Bulk API insert or SOQL query.
key (:obj:`str`): Key to extract - 'Id' for queries or 'id' for inserted data.
Returns:
(:obj:`list`) of inserted record Ids in form [{'Id':'001000000000001'},...]
"""
return [{'Id': record[key]} for record in records] | e1373aee926406a4a780f6c344069702350cb16d | 20,775 |
def auto_help(func):
"""Automatically registers a help command for this group."""
if not isinstance(func, commands.Group):
raise TypeError('Auto help can only be applied to groups.')
cmd = commands.Command(_call_help, name='help', hidden=True)
func.add_command(cmd)
return func | bfcbd0d951dcffb43363b87b6bc4162226db1413 | 20,777 |
import itertools
import operator
def unique_justseen(iterable, key=None):
"""
List unique elements, preserving order. Remember only the element just seen.
>>> [x for x in unique_justseen('AAAABBBCCDAABBB')]
['A', 'B', 'C', 'D', 'A', 'B']
>>> [x for x in unique_justseen('ABBCcAD', str.lower)]
['A', 'B', 'C', 'A', 'D']
"""
imap = itertools.imap
itemgetter = operator.itemgetter
groupby = itertools.groupby
return imap(next, imap(itemgetter(1), groupby(iterable, key))) | 97323df08e9a001b5cd81cbcb88ae3e8ae486a8b | 20,778 |
def unique_rows(arr, thresh=0.0, metric='euclidean'):
"""Returns subset of rows that are unique, in terms of Euclidean distance
http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
"""
distances = squareform(pdist(arr, metric=metric))
idxset = {tuple(np.nonzero(v)[0]) for v in distances <= thresh}
return arr[[x[0] for x in idxset]] | 79afdddc50239ed1479deeb75a41ea19d2dec9ca | 20,779 |
import json
def backdoors_listing(request,option=None):
"""
Generate the Backdoor listing page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', 'csv', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
request.user._setup()
user = request.user
if user.has_access_to(BackdoorACL.READ):
if option == "csv":
return generate_backdoor_csv(request)
elif option== "jtdelete" and not user.has_access_to(BackdoorACL.DELETE):
result = {'sucess':False,
'message':'User does not have permission to delete Backdoor.'}
return HttpResponse(json.dumps(result,
default=json_handler),
content_type="application/json")
return generate_backdoor_jtable(request, option)
else:
return render_to_response("error.html",
{'error': 'User does not have permission to view backdoor listing.'},
RequestContext(request)) | f4c8d2b2be68c40de7ec9e79a29e592618afbb44 | 20,780 |
def my_join(x):
"""
:param x: -> the list desired to join
:return:
"""
return ''.join(x) | bffc33247926c2b1ebe1930700ed0ad9bcb483ec | 20,781 |
def visualize_code_vectors(code_vectors, cmap='Paired', inter='none',
origin='upper', fontsize=16, aspect='auto',
colorbar=True):
"""
Document
"""
to_plot = np.array(code_vectors)
# First the parameters
to_plot_title = 'Code Vectors in Time'
cmap = cmap
inter = inter
origin = origin
fontsize = fontsize # The fontsize
fig_size = (16, 12)
axes_position = [0.1, 0.1, 0.8, 0.8]
xlabel = 'Sensor Clusters'
ylabel = 'Time'
fig = plt.figure(figsize=fig_size)
ax = fig.add_axes(axes_position)
im = plt.imshow(to_plot, interpolation=inter, cmap=cmap,
origin=origin, aspect=aspect)
# Se the labels and titles
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(to_plot_title)
# Se the ticks names for x
# x_labels = np.arange(Nseries * Nseries + 1)
# ax.xaxis.set_major_formatter(plt.FixedFormatter(x_labels))
# ax.xaxis.set_major_locator(plt.MultipleLocator(1))
# Change the font sizes
axes = fig.get_axes()
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# Colorbar (This makes the axes to display proper)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax=cax)
cbar.solids.set_edgecolor('face')
return fig | 39269d46a8ec544ccf415a363489b83f43d7fbc0 | 20,783 |
def get_displacements_and_forces(disp_dataset):
"""Return displacements and forces of all atoms from displacement dataset.
This is used to extract displacements and forces from displacement dataset.
This method is considered more-or-less as a converter when the input is in
type-1.
Parameters
----------
disp_dataset : dict
Displacement dataset either in type-1 or type-2.
Returns
-------
displacements : ndarray
Displacements of all atoms in all supercells.
shape=(snapshots, supercell atoms, 3), dtype='double', order='C'
forces : ndarray or None
Forces of all atoms in all supercells.
shape=(snapshots, supercell atoms, 3), dtype='double', order='C'
None is returned when forces don't exist.
"""
if "first_atoms" in disp_dataset:
natom = disp_dataset["natom"]
disps = np.zeros(
(len(disp_dataset["first_atoms"]), natom, 3), dtype="double", order="C"
)
forces = None
for i, disp1 in enumerate(disp_dataset["first_atoms"]):
disps[i, disp1["number"]] = disp1["displacement"]
if "forces" in disp1:
if forces is None:
forces = np.zeros_like(disps)
forces[i] = disp1["forces"]
return disps, forces
elif "displacements" in disp_dataset:
if "forces" in disp_dataset:
forces = disp_dataset["forces"]
else:
forces = None
return disp_dataset["displacements"], forces | 998f3fd6319ad777b4dfa523b8464b45ce9bae52 | 20,784 |
from typing import Dict
from datetime import datetime
def GetUserAllBasicData(user_url: str) -> Dict:
"""获取用户的所有基础信息
Args:
user_url (str): 用户个人主页 Url
Returns:
Dict: 用户基础信息
"""
result = {}
json_obj = GetUserJsonDataApi(user_url)
html_obj = GetUserPCHtmlDataApi(user_url)
anniversary_day_html_obj = GetUserNextAnniversaryDayHtmlDataApi(UserUrlToUserSlug(user_url))
result["name"] = json_obj["nickname"]
result["url"] = user_url
result["uslug"] = UserUrlToUserSlug(user_url)
result["gender"] = json_obj["gender"]
result["followers_count"] = json_obj["following_users_count"]
result["fans_count"] = json_obj["followers_count"]
result["articles_count"] = json_obj
result["wordage"] = json_obj["total_wordage"]
result["likes_count"] = json_obj["total_likes_count"]
try:
result["assets_count"] = html_obj.xpath("//div[@class='info']/ul/li[6]/div[@class='meta-block']/p")[0].text
result["assets_count"] = float(result["assets_count"].replace(".", "").replace("w", "000"))
except IndexError:
result["assets_count"] = None
if json_obj["total_wordage"] == 0 and json_obj["jsd_balance"] == 0:
result["FP_count"] = None
else:
result["FP_count"] = json_obj["jsd_balance"] / 1000
if result["assets_count"] and result["FP_count"]:
result["FTN_count"] = result["assets_count"] - result["FP_count"]
result["FTN_count"] = round(abs(result["FTN_count"]), 3)
else:
result["FTN_count"] = None
result["badges_list"] = html_obj.xpath("//li[@class='badge-icon']/a/text()")
result["badges_list"] = [item.replace(" ", "").replace("\n", "") for item in result["badges_list"]] # 移除空格和换行符
result["badges_list"] = [item for item in result["badges_list"] if item != ""] # 去除空值
result["last_update_time"] = datetime.fromtimestamp(json_obj["last_updated_at"])
try:
result["vip_info"] = {
"vip_type": {
"bronze": "铜牌",
"silver": "银牌",
"gold": "黄金",
"platina": "白金"
}[json_obj["member"]["type"]],
"expire_date": datetime.fromtimestamp(json_obj["member"]["expires_at"])
}
except KeyError:
result["vip_info"] = {
"vip_type": None,
"expire_date": None
}
result["introduction_html"] = json_obj["intro"]
if not result["introduction_html"]:
result["introduction_text"] = ""
else:
result["introduction_text"] = "\n".join(etree.HTML(result["introduction_html"]).xpath("//*/text()"))
result["next_anniversary_day"] = anniversary_day_html_obj.xpath('//*[@id="app"]/div[1]/div/text()')[0]
result["next_anniversary_day"] = datetime.fromisoformat("-".join(findall(r"\d+", result["next_anniversary_day"])))
return result | 1965484ef2aa638f9084720e38c5f660d9000665 | 20,785 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.