content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def is_json_request_accept(req):
"""Test if http request 'accept' header configured for JSON response.
:param req: HTTP request
:return: True if need to return JSON response.
"""
return (
type(req.accept) is accept.NoHeaderType or
type(req.accept) is accept.ValidHeaderType and (
req.accept.header_value == 'application/json' or
req.accept.header_value == '*/*'
)
)
|
1a73946c5d090b905ceb09d2841efc316659a90d
| 28,561 |
def make_hierarchy(parent_ps, relative_size, make_subsys, *args, **kwargs):
"""
"""
parent_size = parent_ps.radial_size
ps = ParticleSystem()
for p in parent_ps:
subsys = make_subsys(*args, **kwargs)
subsys.dynrescale_total_mass(p.mass)
subsys_size = relative_size * parent_size
subsys.dynrescale_radial_size(subsys_size)
subsys.com_to_origin()
subsys.com_move_to(p.com_r, p.com_v)
ps.append(subsys)
ps.id = range(ps.n)
return ps
|
3381290bca4791f1ad342b9478855bdaf1646b22
| 28,562 |
def get_site_stats(array, player_names):
"""
Return the summarized statistics for a given array corresponding
to the values sampled for a latent or response site.
"""
if len(array.shape) == 1:
df = pd.DataFrame(array).transpose()
else:
df = pd.DataFrame(array, columns=player_names).transpose()
return df.apply(pd.Series.describe, axis=1)[["mean", "std", "25%", "50%", "75%"]]
|
7105e5cd932675f812ec9b7c3c4299b138af49b2
| 28,564 |
def new(data=None, custom=None):
"""Return a fresh instance of a KangarooTwelve object.
Args:
data (bytes/bytearray/memoryview):
Optional.
The very first chunk of the message to hash.
It is equivalent to an early call to :meth:`update`.
custom (bytes):
Optional.
A customization byte string.
:Return: A :class:`K12_XOF` object
"""
return K12_XOF(data, custom)
|
5177ce6dccdc7ec7b764f90748bfda48b1c6bf6f
| 28,565 |
def document_edit(document_id: int):
"""Edits document entry.
Args:
document_id: ID of the document to be edited
"""
document = Upload.get_by_id(document_id)
if not document:
return abort(404)
form = DocumentEditForm()
if request.method == 'GET':
form.name.data = document.name
form.description.data = document.description
form.type.data = document.type
elif form.validate_on_submit():
document.name = form.name.data
document.description = form.description.data
document.type = form.type.data
if form.file.data:
document.replace(form.file.data)
db.session.commit()
return redirect_return()
return render_template('upload/document.html', form=form)
|
5233466553b98566c624a887e0d14556f6edeae9
| 28,566 |
from typing import Union
from typing import List
def get_output_tensors(graph: Union[tf.Graph, GraphDef]) -> List[str]:
"""
Return the names of the graph's output tensors.
Args:
graph: Graph or GraphDef object
Returns:
List of tensor names
"""
return [node.tensor for node in get_output_nodes(graph)]
|
598e1e5d223875bc2e094127ab40dfc84a05f2f8
| 28,567 |
def create_build_list(select_repo, all_repos_opt):
"""Create a list of repos to build depending on a menu that the user picks from."""
if all_repos_opt is True:
build_list = repo_info.REPO_LIST
print "Building repos: " + str(build_list)
print "\n"
return build_list
# If the user has selcted the repos to build, the indexes are used to select
# the repo names from the menu and they are appended to the build_list
select_repo_list = select_repo.split(',')
print "select_repo_list:", select_repo_list
select_repo_map = map(int, select_repo_list)
print "select_repo_map:", select_repo_map
build_list = []
for repo_num in select_repo_map:
repo_name = repo_info.REPO_LIST[repo_num]
build_list.append(repo_name)
if not build_list:
print "No applicable repos selected."
exit()
else:
print "Building repos: " + str(build_list)
print "\n"
return build_list
|
3976d4479c2c8ee8c8381362e00aadb161dc5701
| 28,568 |
def hinton(matrix, significant=None, max_weight=None, ax=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = [2 ** np.ceil(np.log(np.abs(matrix[i]).max()) / np.log(2)) for i in range(matrix.shape[0])]
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
if significant is None:
bcolor = color
else:
if np.abs(significant[x][y]) > 2.575:
bcolor = 'blue'
else:
bcolor = color
size = np.sqrt(np.abs(w) / max_weight[x])
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size, facecolor=color, edgecolor=bcolor)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
return ax
|
93e3b4ed863e7542d243ccb5c33fe5187046f3a6
| 28,570 |
def to_bin(s):
"""
:param s: string to represent as binary
"""
r = []
for c in s:
if not c:
continue
t = "{:08b}".format(ord(c))
r.append(t)
return '\n'.join(r)
|
b4c819ae25983a66e6562b3677decd8389f5fbe2
| 28,571 |
def get_bppair(bamfile, bp_cand_df, \
seq_len = 50, seed_len = 5, min_nt = 5,
match_method = 'fuzzy_match'):
"""
get the bppairs from bp_cand_stats (a list of bps)
parameters:
seq_len - # bases within breakend
seed_len - # of bases up and down stream of the breakend
in assembled b sequence. up: seed_len; down: seed_len respectively
"""
bp_cand_df_sorted = bp_cand_df.sort_values(['Chrom', 'Coord', 'Clip'])
bplist = [x[1:4] for x in bp_cand_df_sorted.itertuples()]
# get the breakpoint pairs
# note: searching use a different (shorter) seq_len parameter
# to increase running efficiency
bpduo = get_breakpoint_duo(bamfile, bplist, seq_len, seed_len, min_nt)
# count the # of supporting reads (clip reads)
# note: counting use a different (longer) seq_len parameter
# to ensure the reads are fully coverred in the breakend
tt = [list(row[0:3] + row[3:6] \
+ tuple(join_breakpoint(bamfile, row[0:3], row[3:6], \
offset = row[7], match_method = match_method)) \
+ row[7:9])\
for row in bpduo]
# format output
t2 = [row[0:6] + [row[6][1] + row[7][1]] + row[8:10] \
for row in tt \
if row[6][1] > 0 and row[7][1] > 0 \
and row[6][0] == row[7][0]]
colnames = ["Chrom1", "Coord1", "Clip1",
"Chrom2", "Coord2", "Clip2", 'Count', 'offset', 'Seq']
bp_pair_df = pd.DataFrame(t2, columns = colnames)
return bp_pair_df.sort_values(
'Count', ascending=False).reset_index(drop=True)
|
be37ded59aac2cd481e3891e8660b4c08c7327ee
| 28,572 |
def get_dropdown_items(df: pd.DataFrame, attribute: str) -> list:
"""
Returns a list of dropdown elements for a given attribute name.
:param df: Pandas DataFrame object which contains the attribute
:param attribute: str, can be either port, vessel_type, year, or month
:return: list of unique attribute values
"""
if attribute == "port":
return df["port"].unique().tolist()
elif attribute == "vessel_type":
return ["All", *sorted(df["ship_type"].unique().tolist())]
elif attribute == "year":
return df["year"].unique().tolist()
elif attribute == "month":
return df["month"].unique().tolist()
else:
raise KeyError("Invalid value for `argument`")
|
c66b17cc4e47e05604b7cc6fde83fd2536b25962
| 28,573 |
import time
def RunFromFile():
"""Take robot commands as input"""
lm = ev3.LargeMotor("outC")
assert lm.connected # left motor
rm = ev3.LargeMotor("outA")
assert rm.connected # right motor
drive = ReadInDirection()
t0 = time.time()
a = True
while a:
a = drive.run()
t1 = time.time()
lm.run_forever(speed_sp=(0))
rm.run_forever(speed_sp=(0))
return t1 - t0
|
e77546cef50c27d292deb22f65a524a7d402a640
| 28,574 |
def veljavna(barva, di, dj, polje, i, j):
"""Če je poteza v smeri (di,dj) na polju (i,j) veljavna, vrne True, sicer vrne False"""
#parametra di in dj predstavljata spremembo koordinate i in koordinate j
#npr. če je di==1 in dj==1, se pomikamo po diagonali proti desnemu spodnjemu
#robu plošče in preverjamo, ali je v tej smeri poteza veljavna
k = 1
while (0 <= i + k * di <= 7) and (0 <= j + k * dj <= 7) and polje[i+k*di][j+k*dj] == drugi(barva):
k += 1
if (0 <= i +k * di <= 7) and (0 <= j + k * dj <= 7):
return polje[i+k*di][j+k*dj] == barva and k>1
else:
return False
|
128cf01f8947a30d8c0e4f39d4fd54308892a103
| 28,575 |
def _do_filter(items, scores, filter_out, return_scores, n):
"""Filter items out of the recommendations.
Given a list of items to filter out, remove them from recommended items
and scores.
"""
# Zip items/scores up
best = zip(items, scores)
return _recommend_items_and_maybe_scores(
best=best, return_scores=return_scores, n=n,
filter_items=filter_out)
|
644cdbe1072dfa397e58f9d51a21fc515d569afe
| 28,576 |
def userlogout(request):
"""
Log out a client from the application.
This funtion uses django's authentication system to clear the session,
etc. The view will redirect the user to the index page after logging
out.
Parameters:
request -- An HttpRequest
Returns:
An HttpResponseRedirect to the root url.
"""
key = request.session.session_key
logout(request)
Session.objects.filter(session_key=key).delete()
if 'next' in request.GET:
return HttpResponseRedirect(request.GET['next'])
else:
return HttpResponseRedirect('/')
|
e12bb923268592841f0c98613ab0226f56c8cbf6
| 28,577 |
def regularize(dn, a0, method):
"""Regularization (amplitude limitation) of radial filters.
Amplitude limitation of radial filter coefficients, methods according
to (cf. Rettberg, Spors : DAGA 2014)
Parameters
----------
dn : numpy.ndarray
Values to be regularized
a0 : float
Parameter for regularization (not required for all methods)
method : {'none', 'discard', 'softclip', 'Tikh', 'wng'}
Method used for regularization/amplitude limitation
(none, discard, hardclip, Tikhonov, White Noise Gain).
Returns
-------
dn : numpy.ndarray
Regularized values.
hn : array_like
"""
idx = np.abs(dn) > a0
if method == 'none':
hn = np.ones_like(dn)
elif method == 'discard':
hn = np.ones_like(dn)
hn[idx] = 0
elif method == 'hardclip':
hn = np.ones_like(dn)
hn[idx] = a0 / np.abs(dn[idx])
elif method == 'softclip':
scaling = np.pi / 2
hn = a0 / abs(dn)
hn = 2 / np.pi * np.arctan(scaling * hn)
elif method == 'Tikh':
a0 = np.sqrt(a0 / 2)
alpha = (1 - np.sqrt(1 - 1/(a0**2))) / (1 + np.sqrt(1 - 1/(a0**2)))
hn = 1 / (1 + alpha**2 * np.abs(dn)**2)
# hn = 1 / (1 + alpha**2 * np.abs(dn))
elif method == 'wng':
hn = 1/(np.abs(dn)**2)
# hn = hn/np.max(hn)
else:
raise ValueError('method must be either: none, ' +
'discard, hardclip, softclip, Tikh or wng')
dn = dn * hn
return dn, hn
|
fe4722a273060dc59b5489c0447e6e8a79a3046f
| 28,580 |
from typing import Optional
def get_fields_by_queue(client: Client, queue: Optional[list]) -> list:
"""
Creating a list of all queue ids that are in the system.
Args:
client: Client for the api.
Returns:
list of queue ids.
"""
if queue:
queues_id = queue
else:
queues_id = get_queue_ids(client)
fields: list = []
for q in queues_id:
client.update_token()
fields_by_queue = client.queues_list_fields_request(queue_number=str(q))
fields_by_queue = fields_by_queue.get('Fields', [])
for field in fields_by_queue:
if field.get('jsonKey') not in fields:
# get internal error 500 from server with related tickets
if field.get('jsonKey') != 'related_tickets' and field.get('jsonKey') != 'referring_tickets':
fields.append(field.get('jsonKey'))
return fields
|
a6ee562e50ec749ec9132bf39ca0e39b0336bdbc
| 28,581 |
def emitter_20():
"""Interval, emit from center, velocity fixed speed around 360 degrees"""
e = arcade.Emitter(
center_xy=CENTER_POS,
emit_controller=arcade.EmitterIntervalWithTime(DEFAULT_EMIT_INTERVAL, DEFAULT_EMIT_DURATION),
particle_factory=lambda emitter: arcade.LifetimeParticle(
filename_or_texture=TEXTURE,
change_xy=arcade.rand_on_circle((0.0, 0.0), PARTICLE_SPEED_FAST),
lifetime=DEFAULT_PARTICLE_LIFETIME,
scale=DEFAULT_SCALE,
alpha=DEFAULT_ALPHA
)
)
return emitter_20.__doc__, e
|
6a7d6689299cab15fbe6ab95e6bb62164ae09657
| 28,582 |
def add_mask_rncc_losses(model, blob_mask):
"""Add Mask R-CNN specific losses"""
loss_mask = model.net.SigmoidCrossEntropyLoss(
[blob_mask, 'masks_init32'],
'loss_mask',
scale=model.GetLossScale() * cfg.MRCNN.WEIGHT_LOSS_MASK
)
loss_gradients = blob_utils.get_loss_gradients(model, [loss_mask])
model.AddLosses('loss_mask')
return loss_gradients
|
e8ae0e80e2ca3ce6f7782173872f4d3e01c916c1
| 28,583 |
def fixed_discount(order: Order):
"""
5k fixed amount discount
"""
return Decimal("5000")
|
867a98049e19aea03d421c37141dbc7acd651fc9
| 28,584 |
def index():
"""
The index page.
Just welcomes the user and asks them to start a quiz.
"""
return render_template('index.html')
|
13e70c6fd82c11f3cd6aed94b043fd1110e65c3c
| 28,585 |
from typing import BinaryIO
def parse_element(stream: BinaryIO):
"""Parse the content of the UPF file to determine the element.
:param stream: a filelike object with the binary content of the file.
:return: the symbol of the element following the IUPAC naming standard.
"""
lines = stream.read().decode('utf-8')
match = REGEX_ELEMENT_V2.search(lines)
if match:
return match.group('element')
match = REGEX_ELEMENT_V1.search(lines)
if match:
return match.group('element')
raise ValueError('could not parse the element from the UPF content.')
|
2911d7ee97df77fd02bbd688a5044c8ba6f5434e
| 28,586 |
from typing import Optional
from typing import List
from typing import Any
from typing import Type
def mention_subclass(
class_name: str,
cardinality: Optional[int] = None,
values: Optional[List[Any]] = None,
table_name: Optional[str] = None,
) -> Type[Mention]:
"""Create new mention.
Creates and returns a Mention subclass with provided argument names,
which are Context type. Creates the table in DB if does not exist yet.
Import using:
.. code-block:: python
from fonduer.candidates.models import mention_subclass
:param class_name: The name of the class, should be "camel case" e.g.
NewMention
:param table_name: The name of the corresponding table in DB; if not
provided, is converted from camel case by default, e.g. new_mention
:param values: The values that the variable corresponding to the Mention
can take. By default it will be [True, False].
:param cardinality: The cardinality of the variable corresponding to the
Mention. By default is 2 i.e. is a binary value, e.g. is or is not
a true mention.
"""
if table_name is None:
table_name = camel_to_under(class_name)
# If cardinality and values are None, default to binary classification
if cardinality is None and values is None:
values = [True, False]
cardinality = 2
# Else use values if present, and validate proper input
elif values is not None:
if cardinality is not None and len(values) != cardinality:
raise ValueError("Number of values must match cardinality.")
if None in values:
raise ValueError("`None` is a protected value.")
# Note that bools are instances of ints in Python...
if any([isinstance(v, int) and not isinstance(v, bool) for v in values]):
raise ValueError(
(
"Default usage of values is consecutive integers."
"Leave values unset if trying to define values as integers."
)
)
cardinality = len(values)
# If cardinality is specified but not values, fill in with ints
elif cardinality is not None:
values = list(range(cardinality))
args = ["context"]
class_spec = (args, table_name, cardinality, values)
if class_name in mention_subclasses:
if class_spec == mention_subclasses[class_name][1]:
return mention_subclasses[class_name][0]
else:
raise ValueError(
f"Mention subclass {class_name} "
f"already exists in memory with incompatible "
f"specification: {mention_subclasses[class_name][1]}"
)
else:
# Set the class attributes == the columns in the database
class_attribs = {
# Declares name for storage table
"__tablename__": table_name,
# Connects mention_subclass records to generic Mention records
"id": Column(
Integer, ForeignKey("mention.id", ondelete="CASCADE"), primary_key=True
),
# Store values & cardinality information in the class only
"values": values,
"cardinality": cardinality,
# Polymorphism information for SQLAlchemy
"__mapper_args__": {"polymorphic_identity": table_name},
# Helper method to get argument names
"__argnames__": args,
}
class_attribs["document_id"] = Column(
Integer, ForeignKey("document.id", ondelete="CASCADE")
)
class_attribs["document"] = relationship(
"Document",
backref=backref(table_name + "s", cascade="all, delete-orphan"),
foreign_keys=class_attribs["document_id"],
)
# Create named arguments, i.e. the entity mentions comprising the
# relation mention.
unique_args = []
for arg in args:
# Primary arguments are constituent Contexts, and their ids
class_attribs[arg + "_id"] = Column(
Integer, ForeignKey("context.id", ondelete="CASCADE")
)
class_attribs[arg] = relationship(
"Context", foreign_keys=class_attribs[arg + "_id"]
)
unique_args.append(class_attribs[arg + "_id"])
# Add unique constraints to the arguments
class_attribs["__table_args__"] = (UniqueConstraint(*unique_args),)
# Create class
C = type(class_name, (Mention,), class_attribs)
# Create table in DB
if Meta.engine and not Meta.engine.has_table(table_name):
C.__table__.create(bind=Meta.engine) # type: ignore
mention_subclasses[class_name] = C, class_spec
# Make this dynamically created class picklable
# https://stackoverflow.com/a/39529149
globals()[class_name] = C
return C
|
1639f19609b3b815a25e22729b3b0379caf13ac2
| 28,587 |
def undupe_column_names(df, template="{} ({})"):
"""
rename df column names so there are no duplicates (in place)
e.g. if there are two columns named "dog", the second column will be reformatted to "dog (2)"
Parameters
----------
df : pandas.DataFrame
dataframe whose column names should be de-duplicated
template : template taking two arguments (old_name, int) to use to rename columns
Returns
-------
df : pandas.DataFrame
dataframe that was renamed in place, for convenience in chaining
"""
new_names = []
seen = set()
for name in df.columns:
n = 1
new_name = name
while new_name in seen:
n += 1
new_name = template.format(name, n)
new_names.append(new_name)
seen.add(new_name)
df.columns = new_names
return df
|
51d13bad25571bc60edd78026bb145ff99281e2d
| 28,588 |
def get_example_data(dataset_name):
"""
This is a smart package loader that locates text files inside our package
:param dataset_name:
:return:
"""
provider = get_provider('ebu_tt_live')
manager = ResourceManager()
source = provider.get_resource_string(manager, 'examples/'+dataset_name)
return source
|
5f5f3fd3485f63a4be2b85c2fed45a76e2d53f7c
| 28,590 |
def js(data):
""" JSをミニファイ """
# 今のところは何もしない
return data
|
2ee82b81dcb3cfb9d133ed218ba1c67b5d16f691
| 28,591 |
def _has_endpoint_name_flag(flags):
"""
Detect if the given flags contain any that use ``{endpoint_name}``.
"""
return '{endpoint_name}' in ''.join(flags)
|
e8827da778c97d3be05ec82ef3367686616d3a88
| 28,592 |
def convert_example(example,
tokenizer,
max_seq_len=512,
max_response_len=128,
max_knowledge_len=256,
mode='train'):
"""Convert all examples into necessary features."""
goal = example['goal']
knowledge = example['knowledge']
goal_knowledge = ' '.join([' '.join(lst) for lst in goal + knowledge])
if mode != 'test':
tokenized_example = tokenizer.dialogue_encode(
example['history'],
response=example['response'],
knowledge=goal_knowledge,
task_type='knowledge',
max_seq_len=max_seq_len,
max_response_len=max_response_len,
max_knowledge_len=max_knowledge_len,
return_length=True)
response_start = tokenized_example['input_ids'].index(
tokenizer.cls_token_id, 1)
response_end = tokenized_example['seq_len']
# Use to gather the logits corresponding to the labels during training
tokenized_example['masked_positions'] = list(
range(response_start, response_end - 1))
tokenized_example['labels'] = tokenized_example['input_ids'][
response_start + 1:response_end]
return tokenized_example
else:
tokenized_example = tokenizer.dialogue_encode(
example['history'],
knowledge=goal_knowledge,
task_type='knowledge',
max_seq_len=max_seq_len,
max_knowledge_len=max_knowledge_len,
add_start_token_as_response=True)
if 'response' in example:
tokenized_example['response'] = example['response']
return tokenized_example
|
5ebce39468cda942f2d4e73cd18f8fa4dd837f0a
| 28,593 |
def mag2Jy(info_dict, Mag):
"""Converts a magnitude into flux density in Jy
Parameters
-----------
info_dict: dictionary
Mag: array or float
AB or vega magnitude
Returns
-------
fluxJy: array or float
flux density in Jy
"""
fluxJy=info_dict['Flux_zero_Jy']*10**(-0.4*Mag)
return fluxJy
|
db8a56e1ca0529cd49abd68dea65ce6aeff7fd22
| 28,594 |
def load_array(data_arrays, batch_size, is_train=True):
"""Construct a PyTorch data iterator.
Defined in :numref:`sec_utils`"""
dataset = ArrayData(data_arrays)
data_column_size = len(data_arrays)
dataset = ds.GeneratorDataset(source=dataset, column_names=[str(i) for i in range(data_column_size)], shuffle=is_train)
dataset = dataset.batch(batch_size)
return dataset
|
804da2b88eceaeb84e2d5d6a3961aa12df958da3
| 28,595 |
def do_get_batched_targets(parser, token):
"""
Retrieves the list of broadcasters for an action and stores them in a context variable which has
``broadcasters`` property.
Example usage::
{% get_batched_targets action_id_list parent_action_id as batched_targets %}
"""
bits = token.contents.split()
if len(bits) != 5:
raise TemplateSyntaxError("'%s' tag takes exactly two arguments" % bits[0])
if bits[3] != 'as':
raise TemplateSyntaxError("second argument to '%s' tag must be 'as'" % bits[0])
return GetBatchedTargets(bits[1],bits[2],bits[4])
|
b4847c5acc480b88c0a9cd7b44467f307eed0d65
| 28,596 |
def task_success_slack_alert(context):
"""
Callback task that can be used in DAG to alert of successful task completion
Args:
context (dict): Context variable passed in from Airflow
Returns:
None: Calls the SlackWebhookOperator execute method internally
"""
slack_webhook_token = BaseHook.get_connection(SLACK_CONN_ID).password
slack_msg = """
:large_blue_circle: Task Succeeded!
*Task*: {task}
*Dag*: {dag}
*Execution Time*: {exec_date}
*Log Url*: {log_url}
""".format(
task=context.get("task_instance").task_id,
dag=context.get("task_instance").dag_id,
ti=context.get("task_instance"),
exec_date=context.get("execution_date"),
log_url="<"+context.get("task_instance").log_url+"| LOGS>",
)
success_alert = SlackWebhookOperator(
task_id="slack_test",
http_conn_id="slack",
webhook_token=slack_webhook_token,
message=slack_msg,
username="airflow",
)
return success_alert.execute(context=context)
|
596694b089f758a683eac677d7ca237a253a2bd2
| 28,597 |
import base64
def return_img_stream(img_local_path):
"""
工具函数:
获取本地图片流
:param img_local_path:文件单张图片的本地绝对路径
:return: 图片流
"""
img_stream = ''
with open(img_local_path, 'rb') as img_f:
img_stream = img_f.read()
img_stream = base64.b64encode(img_stream)
return img_stream
|
7ddee56650fcfabf951ca9b5844a08c7ae5fb2b7
| 28,598 |
from typing import Optional
from typing import Dict
from typing import Union
from typing import Any
def filter_log_on_max_no_activities(log: EventLog, max_no_activities : int = 25, parameters: Optional[Dict[Union[str, Parameters], Any]] = None) -> EventLog:
"""
Filter a log on a maximum number of activities
Parameters
-------------
log
Log
max_no_activities
Maximum number of activities
parameters
Parameters of the algorithm
Returns
-------------
filtered_log
Filtered version of the event log
"""
if parameters is None:
parameters = {}
activity_key = parameters[
PARAMETER_CONSTANT_ACTIVITY_KEY] if PARAMETER_CONSTANT_ACTIVITY_KEY in parameters else DEFAULT_NAME_KEY
parameters[PARAMETER_CONSTANT_ATTRIBUTE_KEY] = activity_key
all_activities = sorted([(x, y) for x, y in get_attribute_values(log, activity_key).items()], key=lambda x: x[1],
reverse=True)
activities = all_activities[:min(len(all_activities), max_no_activities)]
activities = [x[0] for x in activities]
if len(activities) < len(all_activities):
log = apply_events(log, activities, parameters=parameters)
return log
|
b09d376a758a10f784fd2b9f7036cc6e0d58be05
| 28,599 |
def GSSO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2.0.5", **kwargs
) -> Graph:
"""Return GSSO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2.0.5"
Version to retrieve
The available versions are:
- 2.0.5
"""
return AutomaticallyRetrievedGraph(
"GSSO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)()
|
ac5722afae3bb28321aa9d465873b12852b1f2f6
| 28,600 |
from typing import Optional
from typing import Union
from typing import Sequence
from typing import Dict
def tb(
data: pd.DataFrame,
columns: Optional[Union[Sequence[str], pd.Index]] = None,
subscales: Optional[Dict[str, Sequence[int]]] = None,
) -> pd.DataFrame:
"""Compute the **Technology Commitment Questionnaire (TB – Technologiebereitschaft)**.
It consists of the subscales with the item indices (count-by-one, i.e., the first question has the index 1!):
* Technology Acceptance (Technikakzeptanz – ``TechAcc``): [1, 2, 3, 4]
* Technology Competence Beliefs (Technikkompetenzüberzeugungen – ``TechComp``): [5, 6, 7, 8]
* Technology Control Beliefs (Technikkontrollüberzeugungen – ``TechControl``): [9, 10, 11, 12]
.. note::
This implementation assumes a score range of [1, 5].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
.. warning::
Column indices in ``subscales`` are assumed to start at 1 (instead of 0) to avoid confusion with
questionnaire item columns, which typically also start with index 1!
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
subscales : dict, optional
A dictionary with subscale names (keys) and column names or column indices (count-by-1) (values)
if only specific subscales should be computed.
Returns
-------
:class:`~pandas.DataFrame`
TB score
Raises
------
ValueError
if ``subscales`` is supplied and dict values are something else than a list of strings or a list of ints
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
Neyer, F. J. J., Felber, J., & Gebhardt, C. (2016). Kurzskala. Technikbereitschaft (TB)[Technology commitment].
In *ZIS-Zusammenstellung sozialwissenschaftlicher Items und Skalen (ed.)*.
"""
score_name = "TB"
score_range = [1, 5]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
if subscales is None:
_assert_num_columns(data, 12)
subscales = {"TechAcc": [1, 2, 3, 4], "TechComp": [5, 6, 7, 8], "TechControl": [9, 10, 11, 12]}
_assert_value_range(data, score_range)
# Reverse scores 5, 6, 7, 8
# (numbers in the dictionary correspond to the *positions* of the items to be reversed in the item list specified
# by the subscale dict)
data = _invert_subscales(data, subscales=subscales, idx_dict={"TechComp": [0, 1, 2, 3]}, score_range=score_range)
tb_data = _compute_questionnaire_subscales(data, score_name, subscales)
tb_data = pd.DataFrame(tb_data, index=data.index)
if len(data.columns) == 12:
# compute total score if all columns are present
tb_data[score_name] = tb_data.sum(axis=1)
return tb_data
|
ac92f6ed7dd484e076b80db32fff6bc9fdd64619
| 28,601 |
def format_date(date: str):
"""
This function formats dates that are in MM-DD-YYYY format,
and will convert to YYYY-MM-DD, which is required sqlite.
:param date: The date to modify.
:return: The modified string.
"""
tmp = date.split("/")
return "{}-{}-{}".format(tmp[2], tmp[0], tmp[1])
|
f1a0149bfd96db557c49becdedb84789daa1168c
| 28,602 |
def _doUpgradeApply(sUpgradeDir, asMembers):
"""
# Apply the directories and files from the upgrade.
returns True/False/Exception.
"""
#
# Create directories first since that's least intrusive.
#
for sMember in asMembers:
if sMember[-1] == '/':
sMember = sMember[len('testboxscript/'):];
if sMember != '':
sFull = os.path.join(g_ksValidationKitDir, sMember);
if not os.path.isdir(sFull):
os.makedirs(sFull, 0755);
#
# Move the files into place.
#
fRc = True;
asOldFiles = [];
for sMember in asMembers:
if sMember[-1] != '/':
sSrc = os.path.join(sUpgradeDir, sMember);
sDst = os.path.join(g_ksValidationKitDir, sMember[len('testboxscript/'):]);
# Move the old file out of the way first.
sDstRm = None;
if os.path.exists(sDst):
testboxcommons.log2('Info: Installing "%s"' % (sDst,));
sDstRm = '%s-delete-me-%s' % (sDst, uuid.uuid4(),);
try:
os.rename(sDst, sDstRm);
except Exception, oXcpt:
testboxcommons.log('Error: failed to rename (old) "%s" to "%s": %s' % (sDst, sDstRm, oXcpt));
try:
shutil.copy(sDst, sDstRm);
except Exception, oXcpt:
testboxcommons.log('Error: failed to copy (old) "%s" to "%s": %s' % (sDst, sDstRm, oXcpt));
break;
try:
os.unlink(sDst);
except Exception, oXcpt:
testboxcommons.log('Error: failed to unlink (old) "%s": %s' % (sDst, oXcpt));
break;
# Move/copy the new one into place.
testboxcommons.log2('Info: Installing "%s"' % (sDst,));
try:
os.rename(sSrc, sDst);
except Exception, oXcpt:
testboxcommons.log('Warning: failed to rename (new) "%s" to "%s": %s' % (sSrc, sDst, oXcpt));
try:
shutil.copy(sSrc, sDst);
except:
testboxcommons.log('Error: failed to copy (new) "%s" to "%s": %s' % (sSrc, sDst, oXcpt));
fRc = False;
break;
#
# Roll back on failure.
#
if fRc is not True:
testboxcommons.log('Attempting to roll back old files...');
for sDstRm in asOldFiles:
sDst = sDstRm[:sDstRm.rfind('-delete-me')];
testboxcommons.log2('Info: Rolling back "%s" (%s)' % (sDst, os.path.basename(sDstRm)));
try:
shutil.move(sDstRm, sDst);
except:
testboxcommons.log('Error: failed to rollback "%s" onto "%s": %s' % (sDstRm, sDst, oXcpt));
return False;
return True;
|
a181e710db010733828099a881ee7239f90674a7
| 28,603 |
def _parse_atat_lattice(lattice_in):
"""Parse an ATAT-style `lat.in` string.
The parsed string will be in three groups: (Coordinate system) (lattice) (atoms)
where the atom group is split up into subgroups, each describing the position and atom name
"""
float_number = Regex(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?').setParseAction(lambda t: [float(t[0])])
vector = Group(float_number + float_number + float_number)
angles = vector
vector_line = vector + Suppress(LineEnd())
coord_sys = Group((vector_line + vector_line + vector_line) | (vector + angles + Suppress(LineEnd())))
lattice = Group(vector + vector + vector)
atom = Group(vector + Group(OneOrMore(Word(alphas, alphanums + '_'))))
atat_lattice_grammer = coord_sys + lattice + Group(OneOrMore(atom))
# parse the input string and convert it to a POSCAR string
return atat_lattice_grammer.parseString(lattice_in)
|
4cb40f7c25519bc300e389d0a2d72383dda3c7f0
| 28,604 |
from typing import Union
def datetime_attribute_timeseries(time_index: Union[pd.DatetimeIndex, TimeSeries],
attribute: str,
one_hot: bool = False) -> TimeSeries:
"""
Returns a new TimeSeries with index `time_index` and one or more dimensions containing
(optionally one-hot encoded) pd.DatatimeIndex attribute information derived from the index.
Parameters
----------
time_index
Either a `pd.DatetimeIndex` attribute which will serve as the basis of the new column(s), or
a `TimeSeries` whose time axis will serve this purpose.
attribute
An attribute of `pd.DatetimeIndex` - e.g. "month", "weekday", "day", "hour", "minute", "second"
one_hot
Boolean value indicating whether to add the specified attribute as a one hot encoding
(results in more columns).
Returns
-------
TimeSeries
New datetime attribute TimeSeries instance.
"""
if isinstance(time_index, TimeSeries):
time_index = time_index.time_index()
raise_if_not(hasattr(pd.DatetimeIndex, attribute), '"attribute" needs to be an attribute '
'of pd.DatetimeIndex', logger)
num_values_dict = {
'month': 12,
'day': 31,
'weekday': 7,
'hour': 24,
'quarter': 4
}
values = getattr(time_index, attribute)
if one_hot:
raise_if_not(attribute in num_values_dict, "Given datetime attribute not supported"
"with one-hot encoding.", logger)
values_df = pd.get_dummies(values)
# fill missing columns (in case not all values appear in time_index)
for i in range(1, num_values_dict[attribute] + 1):
if not (i in values_df.columns):
values_df[i] = 0
values_df = values_df[range(1, num_values_dict[attribute] + 1)]
else:
values_df = pd.DataFrame(values)
values_df.index = time_index
if one_hot:
values_df.columns = [attribute + '_' + str(column_name) for column_name in values_df.columns]
return TimeSeries(values_df)
|
9330f22d7b81aaeb57130563a3f32009e48e3fe0
| 28,605 |
def hexscale_from_cmap(cmap, N):
"""
Evaluate a colormap at N points.
Parameters
----------
cmap : function
a function taking a scalar value between 0 and 1 and giving a color as
rgb(a) with values between 0 and 1. These are for example the pyplot
colormaps, like plt.cm.viridis
N : int
number of steps on which to evaluate the colormap
Returns
-------
scale : a list of numbers representing the colors from the map, written in
the format 0xrrggbb
"""
rgb = [(round(255*col[0]), round(255*col[1]), round(255*col[2])) for col in map(cmap, np.arange(N)/(N-1))]
return [0x010000*col[0] + 0x000100*col[1] + 0x000001*col[2] for col in rgb]
|
0c26f7b404ac3643317db81eacac83d0d62e5f80
| 28,606 |
def getFirebaseData(userID):
"""
This gets the data from the Firebase database and converts it to a readable dictionary
Args:
userID (string): the id of the user
"""
cred = credentials.Certificate("serviceAccountKey.json")
a = firebase_admin.initialize_app(cred)
ourDatabase = firestore.client()
collection = ourDatabase.collection('pillboxes')
doc = collection.document(userID)
userInfo = doc.get().to_dict()
userAlarms = userInfo['alarms']
firebase_admin.delete_app(a)
return userAlarms
|
4da109004771908009ed431ad69a25ac52d0969c
| 28,607 |
def gen_tracer(code, f_globals):
""" Generate a trace function from a code object.
Parameters
----------
code : CodeType
The code object created by the Enaml compiler.
f_globals : dict
The global scope for the returned function.
Returns
-------
result : FunctionType
A new function with optimized local variable access
and instrumentation for invoking a code tracer.
"""
bc_code = bc.Bytecode.from_code(code)
optimize_locals(bc_code)
bc_code = inject_tracing(bc_code)
bc_code.flags ^= (bc_code.flags & bc.CompilerFlags.NEWLOCALS)
bc_code.argnames = ['_[tracer]'] + bc_code.argnames
bc_code.argcount += 1
new_code = bc_code.to_code()
return FunctionType(new_code, f_globals)
|
abb9e043ad12f4ced014883b6aee230095b63a18
| 28,608 |
def MAD(AnalogSignal):
""" median absolute deviation of an AnalogSignal """
X = AnalogSignal.magnitude
mad = sp.median(sp.absolute(X - sp.median(X))) * AnalogSignal.units
return mad
|
069231a87e755de4bff6541560c0e5beabc91e0d
| 28,609 |
def angular_frequency(vacuum_wavelength):
"""Angular frequency :math:`\omega = 2\pi c / \lambda`
Args:
vacuum_wavelength (float): Vacuum wavelength in length unit
Returns:
Angular frequency in the units of c=1 (time units=length units). This is at the same time the vacuum wavenumber.
"""
return 2 * np.pi / vacuum_wavelength
|
305349cff0d7b9489d92eb301c6d058ca11467f0
| 28,610 |
def web_urls():
"""Builds and returns the web_urls for web.py.
"""
urls = (
'/export/?', RestHandler,
'/export/bdbag/?', ExportBag,
'/export/bdbag/([^/]+)', ExportRetrieve,
'/export/bdbag/([^/]+)/(.+)', ExportRetrieve,
'/export/file/?', ExportFiles,
'/export/file/([^/]+)', ExportRetrieve,
'/export/file/([^/]+)/(.+)', ExportRetrieve,
)
return tuple(urls)
|
d2580499a7b4bad8c94494fd103a2fe0f6d607d6
| 28,611 |
import torch
def box_cxcywh_norm_to_cxcywh(box: TensorOrArray, height: int, width: int) -> TensorOrArray:
"""Converts bounding boxes from (cx, cy, w, h) norm format to (cx, cy, w, h)
format.
(cx, cy) refers to center of bounding box.
(a, r) refers to area (width * height) and aspect ratio (width / height) of
bounding box.
(w, h) refers to width and height of bounding box.
_norm refers to normalized value in the range `[0.0, 1.0]`. For example:
`x_norm = absolute_x / image_width`
`height_norm = absolute_height / image_height`.
Args:
box (TensorOrArray[*, 4]):
Boxes in (cx, cy, w, h) norm format which will be converted.
height (int):
Height of the image.
width (int):
Width of the image.
Returns:
box (TensorOrArray[*, 4]):
Boxes in (cx, cy, w, h) format.
"""
box = upcast(box)
cx_norm, cy_norm, w_norm, h_norm, *_ = box.T
cx = cx_norm * width
cy = cy_norm * height
w = w_norm * width
h = h_norm * height
if isinstance(box, Tensor):
return torch.stack((cx, cy, w, h), -1)
elif isinstance(box, np.ndarray):
return np.stack((cx, cy, w, h), -1)
else:
raise ValueError(f"box must be a `Tensor` or `np.ndarray`.")
|
7cf112b7f3420161513e4b70ef531fb586074431
| 28,612 |
def lyrics_from_url(url):
"""Return a tuple with song's name, author and lyrics."""
source = identify_url(url)
extractor = {
'letras': (lyrics_from_letrasmus, info_from_letrasmus),
'vagalume': (lyrics_from_vagalume, info_from_vagalume)
}
html = html_from_url(url)
if source in extractor:
info = extractor[source][1](html)
return (info[0], info[1], extractor[source][0](html))
raise Exception("Unknow url's source.")
|
2034c1ee26ce563f227f49de10b7e1b56092c7c8
| 28,613 |
import json
def _get_pycons():
"""Helper function that retrieves required PyCon data
and returns a list of PyCon objects
"""
with open(pycons_file, "r", encoding="utf-8") as f:
return [
PyCon(
pycon["name"],
pycon["city"],
pycon["country"],
parse(pycon["start_date"]),
parse(pycon["end_date"]),
pycon["url"],
)
for pycon in json.load(f)
]
|
749947829d4c28b08f957505d8ede02fe8d5ecbb
| 28,614 |
def f_function(chromosome):
"""Define Fitness Function Here."""
x = chromosome.convert_to_integer()
return (15 * x[0]) - (x[0] * x[0])
# return (((15 * x[0]) - (x[0] * x[0])) * -1) + 1000 To Find Minimum Solution
|
aee3744c63ada24302857ef4ddb4e6aff35fc69e
| 28,615 |
def _GetRevsAroundRev(data_series, revision):
"""Gets a list of revisions from before to after a given revision.
Args:
data_series: A list of (revision, value).
revision: A revision number.
Returns:
A list of revisions.
"""
if not _MAX_SEGMENT_SIZE_AROUND_ANOMALY:
return [revision]
middle_index = 0
for i in xrange(len(data_series)):
if data_series[i][0] == revision:
middle_index = i
break
start_index = max(0, middle_index - _MAX_SEGMENT_SIZE_AROUND_ANOMALY)
end_index = middle_index + _MAX_SEGMENT_SIZE_AROUND_ANOMALY + 1
series_around_rev = data_series[start_index:end_index]
return [s[0] for s in series_around_rev]
|
966e590f4cc1e017ed6d4588ca15655b9de61d7a
| 28,616 |
def get_output_length():
"""Returns the length of the convnet output."""
return conv_base.layers[-1].output_shape[-1]
|
6471f0b1331a97147be43b464c5fb5384e185980
| 28,617 |
def rules(command, working_directory=None, root=True, **kargs):
"""
Main entry point for build_rules.py.
When ``makeprojects``, ``cleanme``, or ``buildme`` is executed, they will
call this function to perform the actions required for build customization.
The parameter ``working_directory`` is required, and if it has no default
parameter, this function will only be called with the folder that this
file resides in. If there is a default parameter of ``None``, it will be
called with any folder that it is invoked on. If the default parameter is a
directory, this function will only be called if that directory is desired.
The optional parameter of ``root``` alerts the tool if subsequent processing
of other ``build_rules.py`` files are needed or if set to have a default
parameter of ``True``, processing will end once the calls to this
``rules()`` function are completed.
Commands are 'build', 'clean', 'prebuild', 'postbuild', 'project',
'configurations'
Arg:
command: Command to execute.
working_directory: Directory for this function to operate on.
root: If True, stop execution upon completion of this function
kargs: Extra arguments specific to each command.
Return:
Zero on no error or no action.
"""
if command == 'clean':
burger.clean_directories(
working_directory, ('bin', 'temp', 'obj', 'Properties', '.vs'))
burger.clean_files(working_directory, ('Key.snk', '*.user', '*.suo'))
return 0
|
9456822c0956fa847e19917b735d1a6680d0961a
| 28,618 |
def get_py_func_body(line_numbers, file_name, annot):
""" Function to get method/function body from files
@parameters
filename: Path to the file
line_num: function/method line number
annot: Annotation condition (Ex: @Test)
@return
This function returns python function/method definitions in the given files"""
func_name = []
func_body = []
line_data = list([line.rstrip() for line
in open(file_name, encoding='utf-8', errors='ignore')]) # pragma: no mutate
data, data_func_name = process_py_methods(file_name, line_numbers, line_data)
if annot is not None:
data_func_name, data = get_py_annot_methods(file_name, data_func_name, data, annot)
if len(data_func_name).__trunc__() != 0:
func_name, func_body = process_py_func_body(data, data_func_name)
return func_name, func_body
|
c6324e13831008118a39a599cce8b9ec3513b0a1
| 28,619 |
from typing import Any
from typing import Union
def extract_optional_annotation(annotation: Any) -> Any:
"""
Determine if the given annotation is an Optional field
"""
if (
hasattr(annotation, "__origin__")
and annotation.__origin__ is Union
and getattr(annotation, "__args__", None) is not None
and len(annotation.__args__) == 2
and annotation.__args__[-1] is type(None)
):
return extract_inner_annotation(annotation)
return None
|
024e28f88005b03e45b96c739a44bd56b2115849
| 28,620 |
import itertools
import json
def combine_pred_and_truth(prediction, truth_file):
"""
Combine the predicted labels and the ground truth labels for testing purposes.
:param prediction: The prediction labels.
:param truth_file: The ground truth file.
:return: The combined prediction and ground truth labels.
"""
f = open(truth_file, 'r')
fr = f.readlines()
prediction_and_truth = []
for pred_labels, truth_line in itertools.izip(prediction, fr):
instance = json.loads(truth_line.strip('\r\n'))
truth_labels = instance['tags']
prediction_and_truth.append([pred_labels[0], truth_labels])
return prediction_and_truth
|
d7dee4add59a4b3df7e0bd3a6e5fcc981ff23d59
| 28,621 |
def calc_cogs_time_series(days, cogs_annual):
"""
Cost of Goods Sold Formula
Notes
-----
Can adjust for days/weekly/monthly/annually in the future - ASSUMED: CONSUMABLES PURCHASED MONTHLY
"""
cogs_time_series = []
for i in range(days):
if i % DAYS_IN_MONTH == 0:
cogs_time_series.append(cogs_annual / YEARLY_TO_MONTHLY_31)
else:
cogs_time_series.append(0)
return cogs_time_series
|
b3efffc274676549f23f7a20321dd2aac02c1666
| 28,622 |
def stations():
""" Returning the all Stations """
station_list = session.query(station.name).all()
jsonify_sation = list(np.ravel(station_list))
#Jsonify results
return jsonify(jsonify_sation)
|
ce51f8551043d740657da7fc7d3f3d9afcead4d1
| 28,624 |
def test_run_sht_rudeadyet_default(tmpdir) -> int:
"""
Purpose:
Test to make sure sht-rudeadyet run works
Args:
N/A
Returns:
(Int): 0 if passed run, -1 if not
"""
attack = "sht_rudeadyet"
run_config = "configs/mw_locust-sht_rudeadyet.json"
return magicwand_run(tmpdir, attack, run_config)
|
072a8608dc5ac8007e62e8babbd3047fbb8b8bce
| 28,625 |
def _clean_markdown_cells(ntbk):
"""Clean up cell text of an nbformat NotebookNode."""
# Remove '#' from the end of markdown headers
for cell in ntbk.cells:
if cell.cell_type == "markdown":
cell_lines = cell.source.split('\n')
for ii, line in enumerate(cell_lines):
if line.startswith('#'):
cell_lines[ii] = line.rstrip('#').rstrip()
cell.source = '\n'.join(cell_lines)
return ntbk
|
8b34ff6713a323340ea27f6d8f498a215ca9d98a
| 28,626 |
import asyncio
def get_thread_wrapper(target, name):
"""Returns a target thread that prints unexpected exceptions to the logging.
Args:
target: Func or coroutine to wrap.
name(str): Task name.
"""
@wraps(target)
def wrapper(*args, **kwargs):
try:
result = target(*args, **kwargs)
except Exception:
logger.warning(f"Unexpected exception in Tamarco thread {name}", exc_info=True)
raise
else:
if is_awaitable(result):
thread_loop = asyncio.new_event_loop()
asyncio.set_event_loop(thread_loop)
coro = result
result = thread_loop.run_until_complete(observe_exceptions(coro, name))
return result
return wrapper
|
75ddf5ca81825769e51fd8ed4f850ec9db18a31e
| 28,627 |
def getStructType(ea):
"""
Get type information from an ea. Used to get the structure type id
"""
flags = idaapi.getFlags(ea)
ti = idaapi.opinfo_t()
oi = idaapi.get_opinfo(ea, 0, flags, ti)
if oi is not None:
return ti
else:
return None
|
3ed8a000405f87b0e069d165dd72215852d22bd5
| 28,629 |
def confirm_email(token):
"""
GET endpoint that confirms the new officer user. This endpoint link is normally within
the confirmation email.
"""
club_email = flask_exts.email_verifier.confirm_token(token, 'confirm-email')
if club_email is None:
raise JsonError(status='error', reason='The confirmation link is invalid.', status_=404)
potential_user = NewOfficerUser.objects(email=club_email).first()
if potential_user is None:
raise JsonError(status='error', reason='The user matching the email does not exist.', status_=404)
# First, revoke the given email token
flask_exts.email_verifier.revoke_token(token, 'confirm-email')
if potential_user.confirmed:
return redirect(LOGIN_URL + LOGIN_CONFIRMED_EXT)
confirmed_on = pst_right_now()
if confirmed_on - potential_user.registered_on > CurrentConfig.CONFIRM_EMAIL_EXPIRY:
raise JsonError(status='error', reason='The account associated with the email has expired. Please request for a new confirmation email by logging in.')
# Then, set the user and club to 'confirmed' if it's not done already
potential_user.confirmed = True
potential_user.confirmed_on = confirmed_on
potential_user.save()
return redirect(LOGIN_URL + LOGIN_CONFIRMED_EXT)
|
2e8feb0607361ec0d53b3b62766a83f131ae75c6
| 28,630 |
def xcrun_field_value_from_output(field: str, output: str) -> str:
"""
Get value of a given field from xcrun output.
If field is not found empty string is returned.
"""
field_prefix = field + ': '
for line in output.splitlines():
line = line.strip()
if line.startswith(field_prefix):
return line[len(field_prefix):]
return ''
|
a99efe76e21239f6ba15b8e7fb12d04d57bfb4de
| 28,631 |
def get_all_markets_num():
"""
获取交易所有的市场
:return: "5/2"
"""
markets = list(set([str(i["stockId"]) + "/" + str(i["moneyId"]) for i in res["result"]]))
return markets
|
33d9e49aeaa6e6d81ec199f21d4f5e40cdd0fd48
| 28,632 |
def mae(s, o):
"""
Mean Absolute Error
input:
s: simulated
o: observed
output:
maes: mean absolute error
"""
s, o = filter_nan(s, o)
return np.mean(abs(s - o))
|
313d4605bb240d8f32bc13fc62ff2cf12e22cfd8
| 28,633 |
def error_mult_gap_qa_atom(
df_qc, df_qats, target_label, target_charge=0,
basis_set='aug-cc-pV5Z', use_ts=True,
max_qats_order=4, ignore_one_row=False,
considered_lambdas=None, return_qats_vs_qa=False):
"""Computes QATS errors in system multiplicity gaps.
Parameters
----------
df_qc : :obj:`pandas.DataFrame`
Quantum chemistry dataframe.
df_qats : :obj:`pandas.DataFrame`, optional
QATS dataframe.
target_label : :obj:`str`
Atoms in the system. For example, ``'f.h'``.
target_charge : :obj:`int`, optional
The system charge. Defaults to ``0``.
basis_set : :obj:`str`, optional
Specifies the basis set to use for predictions. Defaults to
``'aug-cc-pV5Z'``.
use_ts : :obj:`bool`, optional
Use a Taylor series approximation to make QATS-n predictions
(where n is the order). Defaults to ``True``.
max_qats_order : :obj:`int`, optional
Maximum order to use for the Taylor series. Defaults to ``4``.
ignore_one_row : :obj:`bool`, optional
Used to control errors in ``state_selection`` when there is missing
data (i.e., just one state). If ``True``, no errors are raised. Defaults
to ``False``.
considered_lambdas : :obj:`list`, optional
Allows specification of lambda values that will be considered. ``None``
will allow all lambdas to be valid, ``[1, -1]`` would only report
predictions using references using a lambda of ``1`` or ``-1``.
return_qats_vs_qa : :obj:`bool`, optional
Return the difference of QATS-n - QATS predictions; i.e., the error of
using a Taylor series approximation with repsect to the alchemical
potential energy surface. Defaults to ``False``.
Returns
-------
:obj:`pandas.DataFrame`
"""
if len(df_qc.iloc[0]['atomic_numbers']) == 2:
raise ValueError('Dimers are not supported.')
qc_prediction = hartree_to_ev(
mult_gap_qc_atom(
df_qc, target_label, target_charge=target_charge,
basis_set=basis_set, ignore_one_row=ignore_one_row
)
)
qats_predictions = mult_gap_qa_atom(
df_qc, df_qats, target_label, target_charge=target_charge,
basis_set=basis_set, use_ts=use_ts, ignore_one_row=ignore_one_row,
considered_lambdas=considered_lambdas,
return_qats_vs_qa=return_qats_vs_qa
)
qats_predictions = {key:hartree_to_ev(value) for (key,value) in qats_predictions.items()} # Converts to eV
if use_ts:
qats_predictions = pd.DataFrame(
qats_predictions, index=[f'QATS-{i}' for i in range(max_qats_order+1)]
) # Makes dataframe
else:
qats_predictions = pd.DataFrame(
qats_predictions, index=['QATS']
) # Makes dataframe
if return_qats_vs_qa:
return qats_predictions
else:
qats_errors = qats_predictions.transform(lambda x: x - qc_prediction)
return qats_errors
|
3facdc35f0994eb21a74cf0b1fb277db2a70a14b
| 28,634 |
async def materialize_classpath(request: MaterializedClasspathRequest) -> MaterializedClasspath:
"""Resolve, fetch, and merge various classpath types to a single `Digest` and metadata."""
artifact_requirements_lockfiles = await MultiGet(
Get(CoursierResolvedLockfile, ArtifactRequirements, artifact_requirements)
for artifact_requirements in request.artifact_requirements
)
lockfile_and_requirements_classpath_entries = await MultiGet(
Get(
ResolvedClasspathEntries,
CoursierResolvedLockfile,
lockfile,
)
for lockfile in (*request.lockfiles, *artifact_requirements_lockfiles)
)
merged_snapshot = await Get(
Snapshot,
MergeDigests(
classpath_entry.digest
for classpath_entries in lockfile_and_requirements_classpath_entries
for classpath_entry in classpath_entries
),
)
if request.prefix is not None:
merged_snapshot = await Get(Snapshot, AddPrefix(merged_snapshot.digest, request.prefix))
return MaterializedClasspath(content=merged_snapshot)
|
367a0a49acfb16e3d7c0c0f1034ef946e75928a8
| 28,636 |
def Real_Entropy(timeseries):
""" Calculates an approximation of the time-correlated entropy
Input:
timeseries: list of strings or numbers,
e.g. ['1', '2', '3'] or [1, 2, 3]
Output:
approximation of Real Entropy (time-correlated entropy), e.g. 1.09
"""
def is_sublist(alist, asublist):
""" Turns string lists into strings and checks if the sublist is in the list
Input:
list_ : list of strings, e.g. ['1', '2', '3']
sublist : list of strings, ['1', '2']
Output:
True if asublist is in alist, False otherwise
"""
alist = "".join(map(str, alist))
asublist = "".join(map(str, asublist))
if asublist in alist:
return True
return False
def shortest_subsequence(timeseries, i):
""" Calculates length of the shortest subsequence
at time step i that has not appeared before
Input:
timeseries: list of strings or numbers,
e.g. ['1', '2', '3'] or [1, 2, 3]
i: time step index, integer starting from 0
Output:
length of the shortest subsequence
"""
sequences = [timeseries[i]]
count = 1
while is_sublist(timeseries[:i], sequences) and i + count <= len(timeseries) - 1:
sequences = sequences + [timeseries[i+count]]
count +=1
return len(sequences)
timeseries = list(map(str, timeseries))
substring_length_gen = (shortest_subsequence(timeseries, i) for i in range(1, len(timeseries)))
shortest_substring_lengths = [1] + list(map(lambda length: length, substring_length_gen))
return np.log(len(timeseries)) * len(timeseries) / np.sum(shortest_substring_lengths)
|
a2d8948723b0f62e91a9255b3f0fb35ccf4b26d8
| 28,637 |
def hom(X, mode):
"""
It converts transformation X (translation, rotation or rigid motion)
to homogenous form.
Input:
X: tf.float32 array, which can be either
[B, 3] float32, 3D translation vectors
[B, 3, 3] float32, rotation matrices
[B, 3, 4] float32, rigid motion matrix
mode: one of 'T', 'R' or 'P' denoting the options above,
'T' is for translation
'R' is for rotation
'P' is for rigid motion
Output:
H: [B, 4, 4] float32, the transformation in homogenous form
"""
hom_pad = tf.constant([[[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 1.]]])
if mode == 'T':
X = X[:,:,tf.newaxis]
padding = [[0, 0], [0, 1], [3, 0]]
if mode == 'R':
padding = [[0, 0], [0, 1], [0, 1]]
if mode == 'P':
padding = [[0, 0], [0, 1], [0, 0]]
H = tf.pad(X, padding) + hom_pad
return H
|
e7104789e996b745a8978867925b8ea5e2c1ad01
| 28,638 |
import re
def parse_sl(comments: str):
"""Parses comments for SL on an order"""
parsed = None
sl_at = "(SL\s{0,1}@\s{0,1})"
sl_price = "([0-9]{0,3}\.[0-9]{1,2}((?!\S)|(?=[)])))"
pattern = sl_at + sl_price
match = re.search(pattern, comments)
if match:
match.groups()
parsed = match.group(2)
return parsed
|
d993fc1686fa2623423269812c834aedb0d504e2
| 28,639 |
def nhpp_thinning(rate_fn, tmax, delta, lbound=None):
"""Nonhomogeneous Poisson process with intensity function `rate_fn` for
time range (0, tmax) using the algorithm by Lewis and Shelder 1978.
rate_fn: a function `f(t)` of one variable `t` that returns a finite non negative
value for `t` in trange.
tmax: right bound of time (the event times will be for (0, t] time interval)
delta: interval for evaluating the rate_fn.
lbound: upper bound on lambda. This is used as the rate of the HPP to be
thinned. If unspecified then use the maximum value of rate_fn evaluated at
points of rate change (0, delta, 2 delta ....)
"""
trange = np.arange(0, tmax+delta, delta)
if lbound is None:
lbound = max(rate_fn(t) for t in trange) * 1.0
st = [0]
while st[-1] < trange[-1]:
isi = np.random.exponential(1/lbound, size=len(trange))
st_ = st[-1] + np.cumsum(isi)
st += list(st_)
st = np.array(st[1:]) # remove the dummy 0
st = st[st <= tmax].copy()
if len(st) == 0:
return np.empty(0)
accept_prob = np.random.uniform(0, lbound, size=len(st))
intensity = np.array([rate_fn(t) for t in st])
return st[accept_prob <= intensity].copy()
|
7761ec918ca1098c17dd997426841e04b93186c2
| 28,640 |
import traceback
import json
def execute_rule_engine(rule_name, body):
"""
:param rule_name:
:param body:
:return:
"""
__logger.info("inside execute_rule_engine for " + rule_name)
__logger.info(json.dumps(body, indent=4, sort_keys=True, default=str))
try:
result = rule_engine_service.execute_rule_engine_service(rule_name, body)
if "code" in result:
if result["code"] == 0:
resp = response_util.get_response(200, "Success", result["message"])
else:
resp = response_util.get_response(400, "Error", result["message"])
else:
resp = response_util.get_response(500, "Error", "Unknown exception")
return resp
except:
__logger.error("Unhandled exception while executing rule engine!!!")
__logger.error(traceback.format_exc())
resp = response_util.get_response(500, "Error", traceback.format_exc())
return resp
|
b413ec4723d1c030e798e21c670ee607a4f4d373
| 28,641 |
def get():
"""
Create and return an instance of the FileSelectionContext
subclass which is appropriate to the currently active application.
"""
windowClass = ContextUtils.getForegroundClassNameUnicode()
if windowClass == u"ConsoleWindowClass":
fsContext = NullFileSelectionContext()
elif windowClass == u"Emacs":
fsContext = NullFileSelectionContext()
else:
fsContext = DefaultFileSelectionContext()
return fsContext
|
14e6e94b55801e9b2eb9c8c2738ca6cb8510939a
| 28,643 |
def calc_accuracy(y_true, y_predict, display=True):
"""Analysis the score with sklearn.metrics.
This module includes score functions, performance metrics
and pairwise metrics and distance computations.
Parameters
==========
y_true: numpy.array
y_predict: numpy.array
display: Boolean
Return
======
result: Dict{name:value}
Examples
========
>>> result = analysis(y_true, y_predict, display=True)
"""
score = ["explained_variance_score", "r2_score"]
error = ["max_error", "mean_absolute_error", "mean_squared_error",
"mean_squared_log_error", "median_absolute_error"]
result = dict()
names = ["score", "error"]
ignore = []
for name in names:
result[name] = dict()
for item in locals()[name]:
try:
result[name][item] = getattr(metrics, item)(y_true, y_predict)
except Exception as e:
print(color(("↓ %s has been removed since `%s`." % \
(item.capitalize(), e))))
ignore.append(item)
if display:
tabu,numerical = None, None
for name in names:
tabu = PrettyTable(["Name of %s"%color(name), "Value"])
for item in locals()[name]:
if item in ignore:
continue
numerical = "%.3e" % result[name][item]
tabu.add_row([color(item,"青"), numerical])
print(tabu)
return result
|
e71cc9773dc593ea6456f28f96c90410e6043fe0
| 28,644 |
def HSV_to_HSL(hsv):
"""Converts HSV color space to HSL"""
rgb = HSV_to_RGB(hsv)
return RGB_to_HSL(rgb)
|
dc847755135f0d96f5b8980154b9ade496c1753f
| 28,645 |
def prosodic_meter_query(
collection,
):
"""
Function for returning all Prosodic Meters that contain the queried collection of
:obj:`fragment.GreekFoot` objects.
:param collection: an iterable collection of :obj:`fragment.GreekFoot` objects.
"""
all_prosodic_meters = get_all_prosodic_meters()
res = []
for meter in all_prosodic_meters:
if all(x in set(meter.components) for x in collection):
res.append(meter)
return res
|
ec55bc910c246051504f4a66fa12dc10211725d5
| 28,646 |
def get_model(loss=keras.losses.MeanSquaredError(), optimizer=keras.optimizers.Adam(), metrics=[keras.metrics.MeanSquaredError()]):
"""
Loads and compiles the model
"""
model = unet3D_model()
model.compile(loss=loss, optimizer=optimizer, metrics=[metrics])
return model
|
0a30796893b2d20885fc5496385317c9fc4f2d08
| 28,647 |
def mps_kph(m_per_s):
"""Convert speed from m/s to km/hr.
:kbd:`m_per_s` may be either a scalar number or a
:py:class:`numpy.ndarray` object,
and the return value will be of the same type.
:arg m_per_s: Speed in m/s to convert.
:returns: Speed in km/hr.
"""
return m_per_s * M_PER_S__KM_PER_HR
|
a0cb03d04edcb21bb6820918c262c8a3d6e9afc3
| 28,648 |
import torch
def evaluate(model: nn.Module, loss_func: nn.Module, loader: iter, logger: Logger, device: str = None):
""" Evaluate the parameters of the model by computing the loss on the data. """
if device is None:
device = next(model.parameters()).device
model.eval()
y_hats, y_s = [], []
with torch.no_grad():
for x_m, x_a, y in loader:
x_m, x_a, y = x_m.to(device), x_a.to(device), y.to(device)
logits, _ = model(x_m, x_a)
loss = loss_func(logits, y)
if logger is not None:
logger.log_step(loss.item())
y_hats.append(logits.cpu().numpy())
y_s.append(y.cpu().numpy())
y_hats = np.concatenate(y_hats, axis=0)
y_s = np.concatenate(y_s, axis=0)
if logger is not None:
return y_hats, y_s, logger.losses
else:
return y_hats, y_s, loss.item()
|
7c7d769dde86771e052703ecd312f4ee62235419
| 28,649 |
def glm_likelihood_bernoulli(parms, X, Y, lamb=1, l_p=1, neg=True, log=True):
"""The likelihood for a logistic regression or bernoulli model with a penalty
term (can accept any norm, default is 1 for L1).
Parameters
----------
parms : numpy array (numeric)
The coefficients (including intercept, which is first)
X : numpy array (numeric)
The independent variables (or feature matrix), where the first column
is a dummy column of 1's (for the intercept).
Y : numpy array or pandas dataframe
The response value (should be 0 or 1, but could be float as well if
you're willing to deal with those consequences).
lamb : int, optional
The size of the penalty (lambda). Note this is the inverse of the
common sklearn parameter C (i.e. C=1/lambda. The default is 1.
l_p : int, optional
The mathematical norm to be applied to the coefficients.
The default is 1, representing an L1 penalty.
neg : bool, optional
Return negative likelihood. The default is True.
log : bool, optional
Return log-likelihood. The default is True.
Returns
-------
float
The likelihood.
Examples
--------
>>> import numpy as np
>>> from tsdst.distributions import glm_likelihood_bernoulli
>>> intercept = np.array([3])
>>> betas = np.array([2,4,5])
>>> params = np.concatenate((intercept, betas))
>>> np.random.seed(123)
>>> X = np.random.normal(size=(100, 3))
>>> X = np.hstack((np.repeat(1, 100).reshape(-1, 1), X))
>>> Y = np.round(np.random.uniform(low=0, high=1, size=100))
>>> glm_likelihood_bernoulli(params, X, Y, lamb=1, l_p=1)
386.6152600787893
"""
#intercept = parms[0]
betas = parms[1:]
mu = X.dot(parms)
Ypred = 1.0/(1.0 + np.exp(-mu))
### Alternate formulation (placing here for my notes)
# loglike = Y*mu - np.log(1 + np.exp(mu))
loglike = np.sum(xlogy(Y, Ypred) + xlogy(1.0 - Y, 1.0 - Ypred)) - lamb*norm(betas, l_p)
if not log:
loglike = np.exp(loglike)
if neg:
return -loglike
else:
return loglike
|
e125d7284045036d412b5afa76730103369142a4
| 28,650 |
import json
def to_tvm(graph, shape_dict, layout, mode='tensorflow'):
"""convert frontend graph to nnvm graph"""
assert mode in FRAME_SUPPORTED
if mode == 'tensorflow':
mod, params = tvm.relay.frontend.from_tensorflow(graph, layout=layout, shape=shape_dict)
elif mode == 'keras':
mod, params = tvm.relay.frontend.from_keras(graph)
elif mode == 'mxnet':
mod, params = tvm.relay.frontend.from_mxnet(graph)
else:
mod, params = tvm.relay.frontend.from_onnx(graph)
mod = tvm.relay.transform.InferType()(mod)
target = 'llvm'
target_host = 'llvm'
with tvm.relay.build_config(opt_level=0):
tvm_graph_json, lib, params = tvm.relay.build(mod, target=target, target_host=target_host, params=params)
#with open("./json/resnet_v1_50_tvm_0.json", 'w') as fp:
#fp.write(tvm_graph)
tvm_graph = json.loads(tvm_graph_json)
_attrstr_to_number(tvm_graph)
return tvm_graph, params
|
861184aafd6e2d428e08acc8f718a5ef30152d27
| 28,651 |
def getAxes():
""" Get each of the axes over which the data is measured. """
df = load_file("atyeo_covid")
df = df.filter(regex='SampleID|Ig|Fc|SNA|RCA', axis=1)
axes = df.filter(regex='Ig|Fc|SNA|RCA', axis=1)
axes = axes.columns.str.split(" ", expand = True)
subject = df['SampleID']
subject = subject[0:22]
antigen = []
receptor = []
for row in axes:
if (row[0] not in antigen):
antigen.append(row[0])
if (row[1] not in receptor):
receptor.append(row[1])
return subject, receptor, antigen
|
d489f1c261a8a92b44563b6842a30cc2c7a880c5
| 28,652 |
import scipy
def stretching_current(ref, cur, dvmin, dvmax, nbtrial, window,t_vec):
"""
Function to perform the stretching of the waveforms:
This function compares the Reference waveform to stretched/compressed current waveforms to get the relative seismic velocity variation (and associated error).
It also computes the correlation coefficient between the Reference waveform and the current waveform.
INPUTS:
- ref = Reference waveform (np.ndarray, size N)
- cur = Current waveform (np.ndarray, size N)
- dvmin = minimum bound for the velocity variation; example: dvmin=-0.03 for -3% of relative velocity change ('float')
- dvmax = maximum bound for the velocity variation; example: dvmax=0.03 for 3% of relative velocity change ('float')
- nbtrial = number of stretching coefficient between dvmin and dvmax, no need to be higher than 50 ('float')
- window = vector of the indices of the cur and ref windows on wich you want to do the measurements (np.ndarray, size tmin*delta:tmax*delta)
- t_vec = time vector, common to both ref and cur (np.ndarray, size N)
OUTPUTS:
- dv = Relative velocity change dv/v (in %)
- cc = correlation coefficient between the reference waveform and the best stretched/compressed current waveform
- Eps = Vector of Epsilon values (Epsilon =-dt/t = dv/v)
"""
Eps = np.asmatrix(np.linspace(dvmin, dvmax, nbtrial))
L = 1 + Eps
tt = np.matrix.transpose(np.asmatrix(t_vec))
tau = tt.dot(L) # stretched/compressed time axis
C = np.zeros((1, np.shape(Eps)[1]))
for j in np.arange(np.shape(Eps)[1]):
s = np.interp(x=np.ravel(tt), xp=np.ravel(tau[:, j]), fp=cur)
waveform_ref = ref[window]
waveform_cur = s[window]
C[0, j] = np.corrcoef(waveform_ref, waveform_cur)[0, 1]
imax = np.nanargmax(C)
if imax >= np.shape(Eps)[1]-1:
imax = imax - 1
if imax <= 2:
imax = imax + 1
dtfiner = np.linspace(Eps[0, imax-1], Eps[0,imax+1], 500)
func = scipy.interpolate.interp1d(np.ravel(Eps[0,np.arange(imax-2, imax+2)]), np.ravel(C[0,np.arange(imax-2, imax+2)]), kind='cubic')
CCfiner = func(dtfiner)
cc = np.max(CCfiner) # Find maximum correlation coefficient of the refined analysis
dv = 100 * dtfiner[np.argmax(CCfiner)] # Final dv/v measurement (x100 to get the dv/v in %)
return dv, cc, Eps
|
9442a940e9013c8ef77bb2c2ecc774c4276a99c5
| 28,653 |
def convert_to_int(var):
"""
Tries to convert an number to int.
:param var
:returns the value of the int or None if it fails
"""
try:
return int(var)
except ValueError:
return None
|
19ba35d351096f2c7b29d78b8df692fc63a75a6f
| 28,654 |
def eval_input_fn(filepath, example_parser, batch_size):
"""
模型的eval阶段input_fn
Args:
filepath (str): 训练集/验证集的路径
example_parser (function): 解析example的函数
batch_size (int): 每个batch样本大小
Returns:
dataset
"""
dataset = tf.data.TFRecordDataset(filepath)
dataset = dataset.batch(batch_size)
dataset = dataset.map(example_parser, num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(1)
return dataset
|
1a67adfe1decd8b38fe8a8d973b7caf1fb6ec85a
| 28,655 |
def ring_forming_scission_ts_zmatrix(rxn, ts_geo):
""" z-matrix for a ring-forming scission transition state geometry
:param rxn: a Reaction object
:param ts_geo: a transition state geometry
"""
rxn = rxn.copy()
# 1. Get keys to linear or near-linear atoms
lin_idxs = list(automol.geom.linear_atoms(ts_geo))
# 2. Add dummy atoms over the linear atoms
rcts_gra = ts.reactants_graph(rxn.forward_ts_graph)
geo, dummy_key_dct = automol.geom.insert_dummies_on_linear_atoms(
ts_geo, lin_idxs=lin_idxs, gra=rcts_gra)
# 3. Add dummy atoms to the Reaction object as well
rxn = add_dummy_atoms(rxn, dummy_key_dct)
# 4. Generate a z-matrix for the geometry
rng_keys, = ts.forming_rings_atom_keys(rxn.forward_ts_graph)
att_key, tra_key, _ = ring_forming_scission_atom_keys(rxn)
# First, cycle the transferring atom to the front of the ring keys and, if
# needed, reverse the ring so that the attacking atom is last
# (transferring atom, ... , atom, attackin atom)
rng_keys = automol.graph.cycle_ring_atom_key_to_front(
rng_keys, tra_key, end_key=att_key)
# Now, cycle the secont-to-last key to the front so that the ring order is:
# (atom, attacking atom, transferring atom, ....)
rng_keys = automol.graph.cycle_ring_atom_key_to_front(
rng_keys, rng_keys[-2])
vma, zma_keys = automol.graph.vmat.vmatrix(rxn.forward_ts_graph)
zma_geo = automol.geom.from_subset(geo, zma_keys)
zma = automol.zmat.from_geometry(vma, zma_geo)
return zma, zma_keys, dummy_key_dct
|
0d9f09210a533a56b64dd3ba559b5f645381c7b7
| 28,656 |
def trip(u, v):
"""
Returns the scalar triple product of vectors u and v and z axis.
The convention is z dot (u cross v). Dotting with the z axis simplifies
it to the z component of the u cross v
The product is:
positive if v is to the left of u, that is,
the shortest right hand rotation from u to v is ccw
negative if v is to the right of u, that is,
the shortest right hand rotation from u to v is cw
zero if v is colinear with u
Essentially trip is the z component of the cross product of u x v
"""
return (u[0] * v[1] - u[1] * v[0])
|
5f687ee4b16dc6c1b350ed574cb632a7c9ca996b
| 28,657 |
def one_cpc(request, hmc_session): # noqa: F811
"""
Fixture representing a single, arbitrary CPC managed by the HMC.
Returns a `zhmcclient.Cpc` object, with full properties.
"""
client = zhmcclient.Client(hmc_session)
cpcs = client.cpcs.list()
assert len(cpcs) >= 1
cpc = cpcs[0]
cpc.pull_full_properties()
return cpc
|
18a42e9777881bbab5f54cbeebefb1bf487d0994
| 28,658 |
import time
def yield_with_display(future_or_iterable, every, timeout=None):
""" Yields for a future and display status every x seconds
:param future_or_iterable: A future to yield on, or a list of futures
:param every: The number of seconds between updates
:param timeout: The total number of seconds to wait for the future, otherwise throws TimeoutError
"""
start_time = time.time()
last_status = start_time
futures = [future_or_iterable] if not isinstance(future_or_iterable, list) else future_or_iterable
# Looping until timeout or future is done
while [1 for future in futures if not future.done()]:
current_time = time.time()
# Timeout reached
if timeout and current_time - start_time > timeout:
raise TimeoutError('Waited %d seconds for future.' % timeout)
# Displaying status
if current_time - last_status > every:
last_status = current_time
LOGGER.info('Still waiting for future(s). %s/%s', int(current_time - start_time), timeout or '---')
# Sleeping
yield gen.sleep(0.1)
# Futures are done, rethrowing exception
for future in futures:
exception = future.exception()
if exception is not None:
raise exception
# Returning results
results = [future.result() for future in futures]
if not isinstance(future_or_iterable, list):
results = results[0]
return results
|
07501c913eaf4a4c497ad0c0c3facc87b946e1f5
| 28,659 |
def query(cmd, db, cgi='http://www.ncbi.nlm.nih.gov/sites/entrez',
**keywds):
"""query(cmd, db, cgi='http://www.ncbi.nlm.nih.gov/sites/entrez',
**keywds) -> handle
Query Entrez and return a handle to the results, consisting of
a web page in HTML format.
See the online documentation for an explanation of the parameters:
http://www.ncbi.nlm.nih.gov/books/bv.fcgi?rid=helplinks.chapter.linkshelp
Raises an IOError exception if there's a network error.
"""
variables = {'cmd' : cmd, 'db' : db}
variables.update(keywds)
return _open(cgi, variables)
|
1287e3551eae2be337abeba31ce7888d60938111
| 28,660 |
def Q2B(uchar):
"""单个字符 全角转半角"""
inside_code = ord(uchar)
if inside_code == 0x3000:
inside_code = 0x0020
else:
inside_code -= 0xfee0
if inside_code < 0x0020 or inside_code > 0x7e: # 转完之后不是半角字符返回原来的字符
return uchar
return chr(inside_code)
|
fa58980c7eb251fa7278caa7bbf6645ad492ed2b
| 28,661 |
from datetime import datetime
def parsedate(date, formats=None, bias=None):
"""parse a localized date/time and return a (unixtime, offset) tuple.
The date may be a "unixtime offset" string or in one of the specified
formats. If the date already is a (unixtime, offset) tuple, it is returned.
>>> parsedate(b' today ') == parsedate(
... datetime.date.today().strftime('%b %d').encode('ascii'))
True
>>> parsedate(b'yesterday ') == parsedate(
... (datetime.date.today() - datetime.timedelta(days=1)
... ).strftime('%b %d').encode('ascii'))
True
>>> now, tz = makedate()
>>> strnow, strtz = parsedate(b'now')
>>> (strnow - now) < 1
True
>>> tz == strtz
True
>>> parsedate(b'2000 UTC', formats=extendeddateformats)
(946684800, 0)
"""
if bias is None:
bias = {}
if not date:
return 0, 0
if isinstance(date, tuple) and len(date) == 2:
return date
if not formats:
formats = defaultdateformats
date = date.strip()
if date == b'now' or date == _(b'now'):
return makedate()
if date == b'today' or date == _(b'today'):
date = datetime.date.today().strftime('%b %d')
date = encoding.strtolocal(date)
elif date == b'yesterday' or date == _(b'yesterday'):
date = (datetime.date.today() - datetime.timedelta(days=1)).strftime(
r'%b %d'
)
date = encoding.strtolocal(date)
try:
when, offset = map(int, date.split(b' '))
except ValueError:
# fill out defaults
now = makedate()
defaults = {}
for part in (b"d", b"mb", b"yY", b"HI", b"M", b"S"):
# this piece is for rounding the specific end of unknowns
b = bias.get(part)
if b is None:
if part[0:1] in b"HMS":
b = b"00"
else:
# year, month, and day start from 1
b = b"1"
# this piece is for matching the generic end to today's date
n = datestr(now, b"%" + part[0:1])
defaults[part] = (b, n)
for format in formats:
try:
when, offset = strdate(date, format, defaults)
except (ValueError, OverflowError):
pass
else:
break
else:
raise error.ParseError(
_(b'invalid date: %r') % pycompat.bytestr(date)
)
# validate explicit (probably user-specified) date and
# time zone offset. values must fit in signed 32 bits for
# current 32-bit linux runtimes. timezones go from UTC-12
# to UTC+14
if when < -0x80000000 or when > 0x7FFFFFFF:
raise error.ParseError(_(b'date exceeds 32 bits: %d') % when)
if offset < -50400 or offset > 43200:
raise error.ParseError(_(b'impossible time zone offset: %d') % offset)
return when, offset
|
512608fb413fa062a4dff00557fbd95120c5441c
| 28,662 |
import glob
def prepare_lv2_data(change_price=False):
"""
read lv1 data and ensemble to make submission
"""
train_files = glob('./models/level1_model_files/train/*')
test_files = glob('./models/level1_model_files/test/*')
num_feat = len(train_files)
nrow = pd.read_csv(train_files[0]).shape[0]
X_train = np.zeros((nrow, num_feat))
num_feat = len(train_files)
X_test = np.zeros((7662, num_feat))
for i, path in enumerate(train_files):
X_train[:, i] = pd.read_csv(path).drop(['index', 'reponse'], axis=1).values.reshape(-1)
for i, train_path in enumerate(train_files):
model_name = train_path.split('{')[0].split('/')[-1]
for test_path in test_files:
if model_name in test_path:
print((model_name))
X_test[:, i] = pd.read_csv(test_path).price_doc.values
y_train = pd.read_csv(train_files[0]).reponse.values
# print(pd.DataFrame(X_train).corr(),pd.DataFrame(X_test).corr())
return X_train, X_test, y_train
|
a10786bf92dccba9cccf7d04c3301b77e008c584
| 28,663 |
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('clldmpg')
config.include('clld_glottologfamily_plugin')
config.include('clld_phylogeny_plugin')
config.register_datatable('familys', datatables.Families)
config.registry.registerUtility(GrambankCtxFactoryQuery(), ICtxFactoryQuery)
config.registry.registerUtility(GrambankMapMarker(), IMapMarker)
config.registry.registerUtility(link_attrs, ILinkAttrs)
return config.make_wsgi_app()
|
6d6eaa8b6c3425023e3550ddae1d3455c939c6fd
| 28,664 |
import numpy
def dwwc(graph, metapath, damping=0.5, dense_threshold=0, dtype=numpy.float64, dwwc_method=None):
"""
Compute the degree-weighted walk count (DWWC) in which nodes can be
repeated within a path.
Parameters
----------
graph : hetio.hetnet.Graph
metapath : hetio.hetnet.MetaPath
damping : float
dense_threshold : float (0 <= dense_threshold <= 1)
sets the density threshold at which a sparse matrix will be
converted to a dense automatically.
dtype : dtype object
dwwc_method : function
dwwc method to use for computing DWWCs. If set to None, use
module-level default (default_dwwc_method).
"""
return dwwc_method(
graph=graph,
metapath=metapath,
damping=damping,
dense_threshold=dense_threshold,
dtype=dtype,
)
|
aa6d30ed04baf2561e3bac7a992ae9e00b985da8
| 28,665 |
def fba_and_min_enzyme(cobra_model, coefficients_forward, coefficients_reverse):
"""
Performs FBA followed by minimization of enzyme content
"""
with cobra_model as model:
model.optimize()
cobra.util.fix_objective_as_constraint(model)
set_enzymatic_objective(model, coefficients_forward, coefficients_reverse)
sol = cobra_model.optimize()
return sol
|
2e6614be30c7d0f343b9d4206d1e7c34d54436f3
| 28,666 |
def logi_led_shutdown():
""" shutdowns the SDK for the thread. """
if led_dll:
return bool(led_dll.LogiLedShutdown())
else:
return False
|
fdb7d77b7fb59804458247c32a35e80da44f6c1f
| 28,667 |
from typing import Dict
def get_blank_adjustments_for_strat(transitions: list) -> Dict[str, dict]:
"""
Provide a blank set of flow adjustments to be populated by the update_adjustments_for_strat function below.
Args:
transitions: All the transition flows we will be modifying through the clinical stratification process
Returns:
Dictionary of dictionaries of dictionaries of blank dictionaries to be populated later
"""
flow_adjs = {}
for agegroup in COVID_BASE_AGEGROUPS:
flow_adjs[agegroup] = {}
for clinical_stratum in CLINICAL_STRATA:
flow_adjs[agegroup][clinical_stratum] = {}
for transition in transitions:
flow_adjs[agegroup][clinical_stratum][transition] = {}
return flow_adjs
|
b2e5391280bae48202f92832aa8821d47a288135
| 28,669 |
def get_instance(module, name, config):
"""
Get module indicated in config[name]['type'];
If there are args to specify the module, specify in config[name]['args']
"""
func_args = config[name]['args'] if 'args' in config[name] else None
# if any argument specified in config[name]['args']
if func_args:
return getattr(module, config[name]['type'])(**func_args)
# if not then just return the module
return getattr(module, config[name]['type'])()
|
ea57e7097665343199956509bb302e3806fb383a
| 28,670 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.