content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def flatten_mock_calls(mock):
"""
Flatten the calls performed on a particular mock object,
into a list of calls with arguments.
"""
result = []
for call in mock.mock_calls:
call = list(call)
call_name = call[0]
if '.' in str(call_name):
call_name = str(call_name).split('.')[-1]
result.append([call_name] + call[1:])
return result | 7c41025382f4ca25db1ccd328e9eb17e1d72a01a | 5,115 |
from typing import Any
def clean_setting(
name: str,
default_value: object,
min_value: int = None,
max_value: int = None,
required_type: type = None,
choices: list = None,
) -> Any:
"""cleans the user input for an app's setting in the Django settings file
Will use default_value if setting is not defined.
Will use minimum or maximum value if respective boundary is exceeded.
Args:
default_value: value to use if setting is not defined
min_value: minimum allowed value (0 assumed for int)
max_value: maximum value value
required_type: Mandatory if `default_value` is `None`,
otherwise derived from default_value
Returns:
cleaned value for setting
This function is designed to be used in a dedicated module like ``app_settings.py``
as layer between the actual settings and all other modules.
``app_settings.py`` will import and clean all settings and all other modules are supposed to import the settings it.
Example for app_settings:
.. code-block:: python
from app_utils.django import clean_setting
EXAMPLE_SETTING = clean_setting("EXAMPLE_SETTING", 10)
"""
if default_value is None and not required_type:
raise ValueError("You must specify a required_type for None defaults")
if not required_type:
required_type = type(default_value)
if min_value is None and issubclass(required_type, int):
min_value = 0
if issubclass(required_type, int) and default_value is not None:
if min_value is not None and default_value < min_value:
raise ValueError("default_value can not be below min_value")
if max_value is not None and default_value > max_value:
raise ValueError("default_value can not be above max_value")
if not hasattr(settings, name):
cleaned_value = default_value
else:
dirty_value = getattr(settings, name)
if dirty_value is None or (
isinstance(dirty_value, required_type)
and (min_value is None or dirty_value >= min_value)
and (max_value is None or dirty_value <= max_value)
and (choices is None or dirty_value in choices)
):
cleaned_value = dirty_value
elif (
isinstance(dirty_value, required_type)
and min_value is not None
and dirty_value < min_value
):
logger.warn(
"You setting for {} it not valid. Please correct it. "
"Using minimum value for now: {}".format(name, min_value)
)
cleaned_value = min_value
elif (
isinstance(dirty_value, required_type)
and max_value is not None
and dirty_value > max_value
):
logger.warn(
"You setting for {} it not valid. Please correct it. "
"Using maximum value for now: {}".format(name, max_value)
)
cleaned_value = max_value
else:
logger.warn(
"You setting for {} it not valid. Please correct it. "
"Using default for now: {}".format(name, default_value)
)
cleaned_value = default_value
return cleaned_value | 91066dd26987ad04fc9ae9b8447e35fa64f8365d | 5,116 |
def update_not_existing_kwargs(to_update, update_from):
"""
This function updates the keyword aguments from update_from in
to_update, only if the keys are not set in to_update.
This is used for updated kwargs from the default dicts.
"""
if to_update is None:
to_update = {}
to_update.update({k:v for k,v in update_from.items() if k not in to_update})
return to_update | a66de151e6bc6d8f5b2f1b0ff32e30d2c8cb5277 | 5,117 |
def linear_forward(A, W, b):
"""Returns Z, (A, W, b)"""
Z = (W @ A) + b
cache = (A, W, b)
return Z, cache | 41d223473d2d8f084f13ca0f90f483b66e479a04 | 5,119 |
import contextlib
import wave
def read_wave(path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (16000, 22050, 32000, 48000)
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate | 5148e788cb5f4bfe63b3e6f2cac24fe704fd9596 | 5,120 |
def update_rho_hat(rho_hat_q, rho_hat_g, phi_hat, K, Q, Y_tp1, gamma_t, W):
"""
rho_hat is an intermediate quantity
rho_hat_{n, nu, theta}(x) = 1/n E[ sum_{t=1}^n s(X_{t-1}, X_t, Y_t | Y_{0:n}, X_n=x)]
where s() are the sufficient statistics
see Cappe (2.5)
In our case (discrete emissions HMM), it be broken down into two separable components:
rho_hat_q{n, nu, theta}(i,j,k; theta) = 1/n E[ sum_{t=1}^n I_{X_{t-1}=i, X_t=j} | Y_{0:n}, X_n=k)]
rho_hat_g{n, nu, theta}(i,k; theta) = 1/n E[ sum_{t=0}^n I_{X_t=i} s(Y_t)| Y_{0:n}, X_n=k)]
where s() here is just a multinoulli vector with W entries, so we can re-express it as
rho_hat_g{n, nu, theta}(i,w,k; theta) = 1/n E[ sum_{t=0}^n I_{X_t=i, Y_t=w}| Y_{0:n}, X_n=k)]
rho_hat_q has KxKxK entries
rho_hat_g has KxWxK entries
"""
rho_hat_q = update_rho_hat_q(rho_hat_q, phi_hat, Q, gamma_t, K)
rho_hat_g = update_rho_hat_g(rho_hat_g, Y_tp1, phi_hat, Q, gamma_t, K, W)
return rho_hat_q, rho_hat_g | 55713f9456ad3e8a5a1bf2fadf58e0befddf717a | 5,121 |
def obtain_dihedral_angles(system_coords, bond_distance):
"""
system_coords: coords for 1 frame
"""
ref_selection = system_coords[0]
# Process bonds for reference frame (first)
bonds = []
sq_bond_distance = bond_distance**2
for i in range(len(ref_selection)-1):
for j in range(i+1, len(ref_selection)):
if mathTools.sq_distance(ref_selection[i], ref_selection[j]) <= sq_bond_distance:
bonds.append(tuple(sorted([i, j])))
print "DBG: Found %d bonds"%(len(bonds))
# Find angles
angles = []
for i in range(len(bonds)-1):
for j in range(i+1, len(bonds)):
if bonds_are_linked(bonds[i], bonds[j]):
angles.append(tuple(sorted([bonds[i], bonds[j]])))
print "DBG: Found %d angles"%(len(angles))
# Finally, find dihedrals
dihedrals = []
for i in range(len(angles)-1):
for j in range(i+1, len(angles)):
if angles_share_bond(angles[i], angles[j]):
dihedrals.append(tuple(sorted([angles[i], angles[j]])))
print "DBG: Found %d dihedrals"%(len(dihedrals))
# Now reorganize atoms in dihedrals so that
# they are consecutive and we can calculate the
# actual dihedral angle
r_dihedrals = []
for dihedral in dihedrals:
indices = get_dihedral_indices(dihedral)
# Get permutation of minimum distance
distances = []
for perm in itertools.permutations(indices):
#print dihedral, perm
distances.append(( mathTools.sq_distance(ref_selection[perm[0]],ref_selection[perm[1]])+
mathTools.sq_distance(ref_selection[perm[1]],ref_selection[perm[2]])+
mathTools.sq_distance(ref_selection[perm[2]],ref_selection[perm[3]]),
perm))
# We will pick the one which summed distances is smaller
distances.sort()
r_dihedrals.append(distances[0][1])
all_angles = []
for ref in system_coords:
#Calculate the angles for a ref
angles = []
for dihedral_indexes in r_dihedrals:
atom1 = ref[dihedral_indexes[0]]
atom2 = ref[dihedral_indexes[1]]
atom3 = ref[dihedral_indexes[2]]
atom4 = ref[dihedral_indexes[3]]
angles.append( mathTools.calc_dihedral(atom1, atom2, atom3, atom4))
all_angles.append(angles)
return numpy.array(all_angles) | 7aba964f81c550e6d6204d28327d65020e7372b0 | 5,122 |
def piecewise_accel(duration,initial,final):
"""Defines a piecewise acceleration.
Args:
duration (float): Length of time for the acceleration to complete.
initial (float): Initial value.
final (float): Final value.
"""
a = (final-initial)
return lambda t: initial + a * (
(9./2 * t**3/duration**3) * (t<duration/3)
+ (-9*t**3/duration**3 + 27./2*t**2/duration**2 - 9./2*t/duration + 1./2) * (t<2*duration/3)*(t>=duration/3)
+ (9./2*t**3/duration**3 - 27./2 * t**2/duration**2 + 27./2*t/duration - 7./2) * (t>= 2*duration/3)) | 7f6acd7ba2610a2e56cc1f0758b3a39543bfe8c2 | 5,123 |
def get_displayed_views(id):
"""
get views in window rect by view id str
:param res_id:
:return:
"""
return get_solo().get_displayed_views(id) | f3058f78ae1a2d70a3771a52cc852f1119a51f6a | 5,124 |
def get_build_version(xform):
"""
there are a bunch of unreliable places to look for a build version
this abstracts that out
"""
version = get_version_from_build_id(xform.domain, xform.build_id)
if version:
return version, BuildVersionSource.BUILD_ID
version = get_version_from_appversion_text(
get_meta_appversion_text(xform)
)
if version:
return version, BuildVersionSource.APPVERSION_TEXT
xform_version = xform.version
if xform_version and xform_version != '1':
return int(xform_version), BuildVersionSource.XFORM_VERSION
return None, BuildVersionSource.NONE | 417debd5d3daf10c28222d42e6cc90869f5779ec | 5,125 |
import typing
def get_shortlist(routing_table: 'TreeRoutingTable', key: bytes,
shortlist: typing.Optional[typing.List['KademliaPeer']]) -> typing.List['KademliaPeer']:
"""
If not provided, initialize the shortlist of peers to probe to the (up to) k closest peers in the routing table
:param routing_table: a TreeRoutingTable
:param key: a 48 byte hash
:param shortlist: optional manually provided shortlist, this is done during bootstrapping when there are no
peers in the routing table. During bootstrap the shortlist is set to be the seed nodes.
"""
if len(key) != constants.HASH_LENGTH:
raise ValueError("invalid key length: %i" % len(key))
return shortlist or routing_table.find_close_peers(key) | 884e4444cca22eaf9495dad8ff28bfc601b4c778 | 5,126 |
from typing import Dict
from typing import Any
def get_merged_contextvars(bound_logger: BindableLogger) -> Dict[str, Any]:
"""
Return a copy of the current context-local context merged with the context
from *bound_logger*.
.. versionadded:: 21.2.0
"""
ctx = get_contextvars()
ctx.update(structlog.get_context(bound_logger))
return ctx | 3ee59f57ee10c4f57c4085e660cc054830688416 | 5,127 |
import warnings
def policy_iteration(policy, env, value_function=None, threshold=0.00001, max_steps=1000, **kwargs):
"""
Policy iteration algorithm, which consists on iterative policy evaluation until convergence for the current policy
(estimate over many sweeps until you can't estimate no more). And then finally updates policy to be greedy.
"""
value_function = last_converged_v_fun = np.zeros(env.world.size) if value_function is None else value_function
greedy_policy = policy
for step_number in range(max_steps):
new_value_function = utils.single_step_policy_evaluation(greedy_policy, env, value_function=value_function, **kwargs)
delta_eval = np.max(value_function - new_value_function)
value_function = new_value_function
if delta_eval < threshold: # policy evaluation converged
new_policy = utils.greedy_policy_from_value_function(greedy_policy, env, value_function=value_function, **kwargs)
delta = np.max(last_converged_v_fun - new_value_function)
last_converged_v_fun = new_value_function
if delta < threshold: # last converged value functions difference converged
break
else:
greedy_policy = new_policy
elif step_number == max_steps - 1:
greedy_policy = utils.greedy_policy_from_value_function(greedy_policy, env, value_function=last_converged_v_fun,
**kwargs)
warning_message = 'Policy iteration did not reach the selected threshold. Finished after reaching ' \
'the maximum {} steps with delta_eval {}'.format(step_number + 1, delta_eval)
warnings.warn(warning_message, UserWarning)
return last_converged_v_fun, greedy_policy | 090fc3a4e87986afc9dfd3565a2d234c7d2e8005 | 5,128 |
def create_blueprint(app):
"""Register blueprint routes on app."""
blueprint = Blueprint(
"invenio_records_marc21",
__name__,
template_folder="../templates",
url_prefix="/marc21",
)
blueprint = init_theme_views(blueprint, app)
blueprint = init_records_views(blueprint, app)
return blueprint | 8aa53185d3d41e4e5aabfa2efaa6a73b94dc02f5 | 5,130 |
import json
def mock_machine():
"""Fixture localapi Machine init with the data/response.json file."""
with requests_mock.Mocker() as mock_resp:
f = open(response_test_path,)
data = json.load(f)
machine_ipaddr = "0.0.0.0"
mock_addr = f"http://{machine_ipaddr}:3000/api/v1/hvac"
mock_resp.post(mock_addr, json=data)
return Machine(machine_ipaddr) | 726aecd3195d39f8a0c48d93a00299a5d61ac90a | 5,131 |
def get_files_links(service, v):
"""Print links of uploaded files.
:param: service (object): Goolge Drive service object.
:param: v (string): Version of Tor Browser to look for.
"""
windows_re = 'torbrowser-install-%s_\w\w(-\w\w)?\.exe(\.asc)?' % v
linux_re = 'tor-browser-linux\d\d-%s_(\w\w)(-\w\w)?\.tar\.xz(\.asc)?' % v
osx_re = 'TorBrowser-%s-osx\d\d_(\w\w)(-\w\w)?\.dmg(\.asc)?' % v
# dictionary to store file names and IDs
files_dict = dict()
print "Trying to fetch links of uploaded files..."
links = service.files().list().execute()
items = links.get('items', [])
if not items:
raise ValueError('No files found.')
else:
for item in items:
if re.search(windows_re, item['title']):
files_dict[item['title']] = item['id']
elif re.search(linux_re, item['title']):
files_dict[item['title']] = item['id']
elif re.search(osx_re, item['title']):
files_dict[item['title']] = item['id']
return files_dict | bda4af382bb629ce40721ccff64553cd2b98d558 | 5,132 |
def list_(context, field, mpd_query=None):
"""
*musicpd.org, music database section:*
``list {TYPE} [ARTIST]``
Lists all tags of the specified type. ``TYPE`` should be ``album``,
``artist``, ``date``, or ``genre``.
``ARTIST`` is an optional parameter when type is ``album``,
``date``, or ``genre``.
This filters the result list by an artist.
*Clarifications:*
The musicpd.org documentation for ``list`` is far from complete. The
command also supports the following variant:
``list {TYPE} {QUERY}``
Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs
of a field name and a value. If the ``QUERY`` consists of more than one
pair, the pairs are AND-ed together to find the result. Examples of
valid queries and what they should return:
``list "artist" "artist" "ABBA"``
List artists where the artist name is "ABBA". Response::
Artist: ABBA
OK
``list "album" "artist" "ABBA"``
Lists albums where the artist name is "ABBA". Response::
Album: More ABBA Gold: More ABBA Hits
Album: Absolute More Christmas
Album: Gold: Greatest Hits
OK
``list "artist" "album" "Gold: Greatest Hits"``
Lists artists where the album name is "Gold: Greatest Hits".
Response::
Artist: ABBA
OK
``list "artist" "artist" "ABBA" "artist" "TLC"``
Lists artists where the artist name is "ABBA" *and* "TLC". Should
never match anything. Response::
OK
``list "date" "artist" "ABBA"``
Lists dates where artist name is "ABBA". Response::
Date:
Date: 1992
Date: 1993
OK
``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"``
Lists dates where artist name is "ABBA" and album name is "Gold:
Greatest Hits". Response::
Date: 1992
OK
``list "genre" "artist" "The Rolling Stones"``
Lists genres where artist name is "The Rolling Stones". Response::
Genre:
Genre: Rock
OK
*GMPC:*
- does not add quotes around the field argument.
*ncmpc:*
- does not add quotes around the field argument.
- capitalizes the field argument.
"""
field = field.lower()
query = _list_build_query(field, mpd_query)
if field == u'artist':
return _list_artist(context, query)
elif field == u'album':
return _list_album(context, query)
elif field == u'date':
return _list_date(context, query)
elif field == u'genre':
pass | b95b6e4e5be01a1796d1708fc214821ce4f78491 | 5,133 |
def palindrome(d: int)-> str:
"""
Function is getting the digits of the number, left shifting it by multiplying
it with 10 at each iteration and adding it the previous result.
Input: Integer
Output: String (Sentence telling if the number is palindrome or not)
"""
remainder = 0
revnum = 0
n = len(str(d))
copynum2 = d
while copynum2 != 0:
remainder = copynum2%10
revnum = revnum * 10 + remainder
copynum2 //= 10
if d == revnum:
return "Given Numer {} is palindrome".format(d)
else:
return "Given Numer {} is not palindrome".format(d) | fe654ab92a905e265987856bcd2106c7b082b490 | 5,134 |
import json
def import_from_file(request):
"""
Import a part of a source site's page tree via an import of a JSON file
exported to a user's filesystem from the source site's Wagtail Admin
The source site's base url and the source page id of the point in the
tree to import defined what to import and the destination parent page
defines where to import it to.
"""
if request.method == 'POST':
form = ImportFromFileForm(request.POST, request.FILES)
if form.is_valid():
import_data = json.loads(form.cleaned_data['file'].read().decode('utf-8-sig'))
parent_page = form.cleaned_data['parent_page']
try:
page_count = import_pages(import_data, parent_page)
except LookupError as e:
messages.error(request, _(
"Import failed: %(reason)s") % {'reason': e}
)
else:
messages.success(request, ungettext(
"%(count)s page imported.",
"%(count)s pages imported.",
page_count) % {'count': page_count}
)
return redirect('wagtailadmin_explore', parent_page.pk)
else:
form = ImportFromFileForm()
return render(request, 'wagtailimportexport/import_from_file.html', {
'form': form,
}) | 0dd6d4f2499a05c13002a0c410a8558b8f5b3b29 | 5,135 |
def _build_groupby_indices(df, table_name, join_columns):
"""
Pre-computes indexes based on the group-by columns.
Returns a dictionary of tuples to the list of indices.
"""
log.info("Grouping table '{}' by: {}.".format(table_name,
", ".join(join_columns)))
ret = df.groupby(join_columns).indices
if len(join_columns) == 1:
# Manually patch the dictionary to make sure its keys are tuples.
ret = {(k,): v for k, v in ret.items()}
return ret | 16ba9cd231aac2560a5735dc4727dd5c15b90fc2 | 5,138 |
from typing import List
def add_multiple_package(package_list: List[str]) -> str:
"""
Generate latex code to add multiple package to preamble
:param package_list: List of package to add in preamble
"""
usepackage_command_list = []
for package in package_list:
usepackage_command_list.append(rf"""\usepackage{{{package}}}""")
return "\n".join(usepackage_command_list) | 90bdd0a521c094d92c35ef92e62d6b43f6b135b4 | 5,139 |
from metrics.models import Group
def emailAdmins(msgData):
"""
Emails all admins with given message. States which admin created/is sending the message to everyone.
Return: {bool}
"""
try:
if not msgData['msg']:
print('No message was provided to send.')
return False
admins = list(Group.objects.get(name='admins').user_set.all().values_list('username', flat=True))
returnMessage, emailSent = sendEmail({
'subject': '[Omnia] Admins communication',
'recipients': admins,
'fromEmail': msgData['fromEmail'],
'message': f'<div style="font-family:sans-serif;font-size:14px;line-height:20px;"><p>Message from {msgData["sender"]} to all {len(admins)} Omnia admins:</p><p>{msgData["msg"]}</p></div>'
})
return (returnMessage, emailSent)
except Exception as ex:
return (f"Error: Admin email failed to send. Error message: {returnMessage}", False) | d44989205c2c60bc618cffcfc9a08ad141f35e4b | 5,140 |
def add(isamAppliance, name, chainItems=[], description=None, check_mode=False, force=False):
"""
Create an STS chain template
"""
if force is False:
ret_obj = search(isamAppliance, name)
if force is True or ret_obj['data'] == {}:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
json_data = {
"name": name,
"chainItems": chainItems
}
if description is not None:
json_data['description'] = description
return isamAppliance.invoke_post(
"Create an STS chain template", uri, json_data,
requires_modules=requires_modules,
requires_version=requires_version)
return isamAppliance.create_return_object() | 7050cfbb052164ed9c570c065b62d5d90609df2c | 5,141 |
def MinHamDistance(pattern, dna_list):
"""Calculate the minimum Hamming distance from a DNA list."""
return sum(HammingDistanceDiffLen(pattern, sequence) for sequence in
dna_list) | 37b1bc96e8a9622060ee6c1361f30df3b69b844f | 5,142 |
from datetime import datetime
def _add_note(text: str, user: KarmaUser) -> str:
"""Adds a new note to the database for the given user."""
_, note_msg = _parse_note_cmd(text)
if not note_msg:
return f"Sorry {user.username}, could not find a note in your message."
if _note_exists(note_msg, user):
return f"Sorry {user.username}, you already have an identical note."
note = KarmaNote(
user_id=user.user_id, timestamp=datetime.datetime.now(), note=note_msg
)
session = db_session.create_session()
session.add(note)
session.commit()
return f"Hey {user.username}, you've just stored a note." | 4b81f45c9839a919b41b6f45a09beaf322821211 | 5,143 |
def process_line(this_line, do_stemming=False, remove_stopwords=False):
"""
Given a line from the CSV file, gets the stemmed tokens.
"""
speech = process_csv_line(this_line)
speech_tokens = process_raw_speech_text(speech.contents, perform_stemming=do_stemming,
delete_stopwords=remove_stopwords)
return speech_tokens | 2730bc7e942a2031f96cc40e889d72cf728bd45a | 5,145 |
def metadef_tag_count(context, namespace_name):
"""Get metadef tag count in a namespace"""
namespace = metadef_namespace_get(context, namespace_name)
_check_namespace_visibility(context, namespace, namespace_name)
count = 0
for tag in DATA['metadef_tags']:
if tag['namespace_id'] == namespace['id']:
count = count + 1
return count | bc863cdbdde5abe4d845f01f49eed1a357e008e4 | 5,146 |
from typing import Literal
def act2graph(graph: Graph, xml_root: Xml, registry: dict,
namespaces: dict, tag: str) -> Graph:
""" Transform activityName tag into RDF graph.
The function transforms the Activity MasterData into identifier. The output
is a RDF graph that represents a part of the Ecoinvent nomenclature
structured with The IEO ontology. The output represents the centrally
registrered identifier (CRID) by the database version and the activity name
identifier, e.g. ecoinvent3.0:88d6c0aa-0053-4367-b0be-05e4b49ff3c5 for the
copper production, primary.
Variables:
- graph: the graph to update
- xml_root: the root of the xml file
- registry: dictionary containing the reference/info of the data registry
- tag: string containing the namespace tag
- namespaces: dictionary containing the namespaces with tags
"""
# crid_reg: CRID registry, e.g Ecoinvent
crid_reg = registry['reg_id']
crid_reg_label = registry['label']
# Database identifier, e.g. EcoInvent3.1
major_release = xml_root.attrib['majorRelease']
minor_release = xml_root.attrib['minorRelease']
database_version = f'v{major_release}_{minor_release}'
database_label = f'{crid_reg_label}{major_release}.{minor_release}'
database_id = crid_reg+database_version
graph.add((ECO[crid_reg], RDFS.label, Literal(crid_reg_label, lang='en')))
graph.add((ECO.activityId, RDFS.subClassOf, ACT_CRID))
activity_id_label = 'EcoInvent activity identifier'
graph.add((ECO.activityId, RDFS.label, Literal(activity_id_label, lang='en')))
graph.add((ECO.activity_name, RDFS.subClassOf, REF_ACTIVITY))
activity_label = 'EcoInvent activity label'
graph.add((ECO.activity_name, RDFS.label, Literal(activity_label, lang='en')))
for activity_name in xml_root.findall(tag, namespaces):
activity_name_id = activity_name.attrib['id']
crid = activity_name_id+database_version
graph.add((ECO[crid], RDF.type, ECO.activityId))
graph.add((ECO[activity_name_id], RDF.type, ECO.activity_name))
# Define the property relation between the symbols of the CRID
graph.add((ECO[crid], BFO.has_part, ECO[database_id]))
graph.add((ECO[database_id], BFO.part_of, ECO[crid]))
graph.add((ECO[crid], BFO.has_part, ECO[activity_name_id]))
graph.add((ECO[activity_name_id], BFO.part_of, ECO[crid]))
# Define the labels with the different languages
xml_ns = namespaces['xml']
for name in activity_name.findall('eco:name', namespaces):
lang = name.attrib['{'+xml_ns+'}lang']
activity_label = name.text
crid_label = f'{database_label}:{activity_label}'
graph.add((ECO[crid], RDFS.label, Literal(crid_label, lang=lang)))
graph.add((ECO[activity_name_id],
RDFS.label,
Literal(activity_label, lang=lang)))
return graph | 899522fa59aa8acf8c0f55377793fc70be6c112b | 5,147 |
from typing import AnyStr
def to_checksum_address(value: AnyStr) -> ChecksumAddress:
"""
Makes a checksum address given a supported format.
"""
norm_address = to_normalized_address(value)
address_hash = encode_hex(keccak(text=remove_0x_prefix(norm_address)))
checksum_address = add_0x_prefix(
"".join(
(
norm_address[i].upper()
if int(address_hash[i], 16) > 7
else norm_address[i]
)
for i in range(2, 42)
)
)
return ChecksumAddress(HexAddress(checksum_address)) | 7223c1fa612a1445c5c7d66410b9f34e4c302a74 | 5,148 |
def is_volatile(type):
"""returns True, if type represents C++ volatile type, False otherwise"""
nake_type = remove_alias(type)
return isinstance(nake_type, cpptypes.volatile_t) | d60e4ea471a818b878267e6f6f9a2e05f2728b1c | 5,149 |
def load_adult(as_frame: bool = False):
"""Load and return the higly imbalanced binary classification [adult income datatest](http://www.cs.toronto.edu/~delve/data/adult/desc.html).
you may find detailed description [here](http://www.cs.toronto.edu/~delve/data/adult/adultDetail.html)
"""
with resources.path(
"pytorch_widedeep.datasets.data", "adult.parquet.brotli"
) as fpath:
df = pd.read_parquet(fpath)
if as_frame:
return df
else:
return df.to_numpy() | 432ef18a197dba1a0e64b7606ba2d350fc402f28 | 5,150 |
from typing import Optional
from typing import Union
from typing import Callable
import json
import types
from typing import NoReturn
def sam(
body: Optional[Union[bool,Callable]] = json.loads,
pathParams: Optional[Union[bool,Callable]] = False,
queryString: Optional[Union[bool,Callable]] = False,
headers: Optional[Union[bool,Callable]] = False,
authenticate: Optional[Callable[[dict], types.AuthUser]] = None,
authorize: Optional[Callable[[types.AuthUser], bool]] = None,
jsonize_response: bool = True,
keep_event: bool = False,
keep_context: bool = False,
pass_auth_user: bool = True,
):
"""Wraps an AWS lambda handler function to handle auth, to catch
and handle errors, and to convert lambda handler default parameters
to a functions declared parameters.
:param body: Should the wrapper function pass `event`'s "body"
attribute as an arg to inner function (called "body")? If `body`
is callable, it will be used to parse the values.
For example, if the body is string-ified JSON, you can use `json.loads`
to load the request (or `parsers.json`, a wrapper around `json.loads`).
Or, you could use a `pydantic` model to parse and validate the input.
If this param parsing raises an error, it will be caught and returned
as an `errors.RequestParseError`.
See also other params: `pathParams`, `queryString`, and `headers`.
:param pathParams: Should the wrapper function pass `event`'s "pathParams"
attribute as an arg to inner function (called "path")? If `pathParams`
is callable, it will be used to parse the values.
See also other params: `body`, `queryString`, and `headers`.
:param queryString: Should the wrapper function pass `event`'s "queryString"
attribute as an arg to inner function (called "query")? If `queryString`
is callable, it will be used to parse the values.
See also other params: `pathParams`, `body`, and `headers`.
:param headers: Should the wrapper function pass `event`'s "headers"
attribute as an arg to inner function (called "headers")? If `headers`
is callable, it will be used to parse the values.
See also other params: `pathParams`, `queryString`, and `body`.
:param authenticate: Function to authenticate the requesting user.
Takes the full `event` as an input and returns a User.
:param authorize: Function to authorize the requesting user.
Note: `authenticate` must also be present.
:param jsonize_response: Should the response body be wrapped in JSON?
If so, the response's body will be a string-ified json dict
of the following form: `{"success": true, "result": ...}`
If `jsonize_response` is `True` but the function's signature
shows a return value of `None` or `NoReturn`, and the function
does in fact return `None`, the body will not have a "result"
attribute, only "success".
If `jsonize_response` is `True` and the returned value is a dict,
that value will be merged with a dict: `{"success": True}`
:param keep_event: Should the `event` dict be passed to the
wrapped function from AWS Lambda?
:param keep_context: Should the `context` object be passed to the
wrapped function from AWS Lambda?
:param pass_auth_user: If authentication function supplied,
should `authUser` be passed as a kwarg to the wrapped function?
:returns: Decorated lambda handler function
"""
# Check authorize/authenticate
if authorize is not None:
assert authenticate is not None, "If `authorize` is not `None`, "+\
"`authenticate` can't be `None`."
def wrapper(fn: Callable):
# Get the function's return type, to use later when
# deciding how to format response
return_type = args.get_return_type(fn)
@ft.wraps(fn)
def inner(event: dict, context) -> dict:
# Store function arguments
kwargs = {}
if authenticate is not None:
# Authenticate the user
try:
user = authenticate(event)
except errors.HttpError as e:
return e.json()
if authorize is not None:
# Authorize the user
try:
if not authorize(user):
raise errors.AuthorizationError()
except errors.HttpError as e:
return e.json()
# Does the user want the authorized
# user as an argument?
if pass_auth_user:
kwargs["authUser"] = user
# Get the query/path/body/header params
if body:
try:
kwargs["body"] = body(event["body"]) if callable(body) else event["body"]
except Exception as e:
return errors.RequestParseError().json(
f"Unable to read request body."
)
if pathParams:
try:
kwargs["path"] = pathParams(event["pathParameters"]) if callable(pathParams) \
else event["pathParameters"]
except Exception as e:
return errors.RequestParseError().json(
f"Unable to read request path parameters."
)
if queryString:
try:
kwargs["query"] = queryString(event["queryStringParameters"]) if callable(queryString) \
else event["queryStringParameters"]
except Exception as e:
return errors.RequestParseError().json(
f"Unable to read request query string parameters."
)
if headers:
try:
kwargs["headers"] = headers(event["headers"]) if callable(headers) else event["headers"]
except Exception as e:
return errors.RequestParseError().json(
f"Unable to read request headers."
)
# Add event/context if requested
if keep_event:
kwargs["event"] = event
if keep_context:
kwargs["context"] = context
# Call the function
try:
res = fn(**kwargs)
except errors.HttpError as e:
return e.json()
except Exception as e:
print(f"UNCAUGHT ERROR: \"{e}\"")
return errors.InternalServerError().json()
# Return a response
if jsonize_response:
# If there isn't a return (as expected)
# just return the success-ness
if res is None and return_type in (None, NoReturn):
return {
"statusCode": 200,
"body": json.dumps({
"success": True,
})
}
# If the response is a dict, merge
# it with the `success`-ness flag
if isinstance(res, dict):
return {
"statusCode": 200,
"body": json.dumps({
"success": True,
**res
})
}
# Otherwise (if result isn't a dict)
# return it as the value to key "result"
return {
"statusCode": 200,
"body": json.dumps({
"success": True,
"result": res,
})
}
else:
# If not json-izing the response, pass
# it as the value to the key "body"
# (still with a status-code of 200)
return {
"statusCode": 200,
"body": res
}
return inner
return wrapper | 4ccdbfc843fd07197819fae730faec97dc2316f7 | 5,151 |
import logging
def get_logger(name=None):
"""return a logger
"""
global logger
if logger is not None: return logger
print('Creating logger========================================>')
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s][%(levelname)s]{%(pathname)s:%(lineno)d} %(message)s')
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger | 34e0aa41b3e8c878574e1ab57eff41238b291672 | 5,152 |
import re
def LF_degen_spine(report):
"""
Checking for degenerative spine
"""
reg_01 = re.compile('degen',re.IGNORECASE)
reg_02 = re.compile('spine',re.IGNORECASE)
for s in report.report_text.text.split("."):
if reg_01.search(s) and reg_02.search(s):
return ABNORMAL_VAL
return ABSTAIN_VAL | d0211476a3f179c26648546c21176866bad7c61e | 5,153 |
def make_log_format(fields, sep=" - "):
"""
Build a custom log format, as accepted by the logging module, from a list of field names.
:param fields: list or tuple of str - names of fields to use in log messages
:param sep: str - separator to put between fields. Default is ' - '
:return: a log format string usable to configure log formatters
"""
assert all(f in log_fields for f in fields), "Only fields from {} are valid".format(
tuple(log_fields)
)
return sep.join("%({}){}".format(f, log_fields[f]) for f in fields) | 7e05f4bced180ef98025576e9fa1b2cf4f296b92 | 5,154 |
def tweets_for(type, args, per_user=None):
"""
Retrieve tweets for a user, list or search term. The optional
``per_user`` arg limits the number of tweets per user, for
example to allow a fair spread of tweets per user for a list.
"""
lookup = {}
lookup[type] = args[0].strip("\"'")
tweets = Tweet.objects.get_for(**lookup)
if per_user is not None:
_tweets = defaultdict(list)
for tweet in tweets:
if len(_tweets[tweet.user_name]) < per_user:
_tweets[tweet.user_name].append(tweet)
tweets = sum(_tweets.values(), [])
tweets.sort(key=lambda t: t.created_at, reverse=True)
if len(args) > 1 and args[-1].isdigit():
tweets = tweets[:int(args[-1])]
return tweets | ae393d887de9d87a13c3d46a30bcc08d78867827 | 5,155 |
def sum_var(A):
"""summation over axis 1 (var) equivalent to np.sum(A, 1)"""
if issparse(A):
return A.sum(1).A1
else:
return np.sum(A, axis=1) if A.ndim > 1 else np.sum(A) | af866cb018a46746456efdb2e0c013a6410f9be4 | 5,156 |
def success_schema():
"""Pytest fixture for successful SchemaModel object"""
scm = SchemaVersion("1.0")
scm.success = True
return scm | c7a918a1be0d77607bccdedf80c3acaf5a56bd32 | 5,157 |
def _interfaces(config):
""" list system interfaces based on shape """
shape = lib.metadata.get_instance()['shape']
print
if config.getboolean('DEFAULT', 'auto') is True:
interfaces = lib.interfaces.get_interfaces_by_shape(shape)
else:
interfaces = config['DEFAULT']['interfaces'].split(',')
return interfaces | 7ea4d493293d910532b514edf4ec7efee2253a34 | 5,158 |
def getColumninfo(columns):
"""
See ElementFaceToThickness.
"""
ColumnC, problematicColumns = ElementFaceToThickness(columns)
return ColumnC | 985fbdabf95932ae4a8b57169ad6e1aaaa36f146 | 5,159 |
from typing import Any
from typing import Optional
def script(
command: str, inputs: Any = [], outputs: Any = NULL, tempdir=False, **task_options
) -> Any:
"""
Execute a shell script as a redun task with file staging.
"""
if outputs == NULL:
outputs = File("-")
command_parts = []
# Prepare tempdir if requested.
temp_path: Optional[str]
if tempdir:
temp_path = mkdtemp(suffix=".tempdir")
command_parts.append('cd "{}"'.format(temp_path))
else:
temp_path = None
# Stage inputs.
command_parts.extend(input.render_stage() for input in iter_nested_value(inputs))
# User command.
command_parts.append(get_wrapped_command(prepare_command(command)))
# Unstage outputs.
file_stages = [value for value in iter_nested_value(outputs) if isinstance(value, Staging)]
command_parts.extend(file_stage.render_unstage() for file_stage in file_stages)
full_command = "\n".join(command_parts)
# Get input files for reactivity.
def get_file(value: Any) -> Any:
if isinstance(value, Staging):
# Staging files and dir turn into their remote versions.
cls = type(value.remote)
return cls(value.remote.path)
else:
return value
input_args = map_nested_value(get_file, inputs)
return _script(
full_command, input_args, outputs, task_options=task_options, temp_path=temp_path
) | fb7b404d7d46680240863778b541afa83dec4528 | 5,160 |
import requests
def get_forms(console: Console, sess: requests.Session, form_id: str = "General_Record_2020v2.0"):
"""
Method to get every form for a given FormID
"""
raw_resp = get_url(url=f"https://forms.agterra.com/api/{form_id}/GetAll/0", sess=sess)
if raw_resp.status_code != 200:
console.log(f"[red] Something went wrong, we got status [white]{raw_resp.status_code}")
json_data = raw_resp.json()
console.log(f"Message Data: {json_data}")
json_data = raw_resp.json()
return json_data | 129a8789a51db7a6e043fe6c8fbb30c1af984a74 | 5,161 |
def load_dataset(input_files,
input_vocab,
mode,
batch_size=32,
min_seq_len=5,
num_buckets=4):
"""Returns an iterator over the training data."""
def _make_dataset(text_files, vocab):
dataset = tf.data.TextLineDataset(text_files)
dataset = dataset.map(lambda x: tf.string_split([x]).values)
dataset = dataset.map(vocab.lookup)
return dataset
def _key_func(x):
if mode == constants.TRAIN:
bucket_width = 6
bucket_id = x["length"] // bucket_width
bucket_id = tf.minimum(bucket_id, num_buckets)
return tf.to_int64(bucket_id)
else:
return 0
def _reduce_func(unused_key, dataset):
return dataset.padded_batch(batch_size,
padded_shapes={
"ids": [None],
"ids_in": [None],
"ids_out": [None],
"ids_in_out": [None],
"length": [], },
)
bos = tf.constant([constants.START_OF_SENTENCE_ID], dtype=tf.int64)
eos = tf.constant([constants.END_OF_SENTENCE_ID], dtype=tf.int64)
# Make a dataset from the input and translated file.
input_dataset = _make_dataset(input_files, input_vocab)
dataset = tf.data.Dataset.zip(input_dataset)
if mode == constants.TRAIN:
dataset = dataset.shuffle(200000)
# Define the input format.
dataset = dataset.map(lambda x: {
"ids": x,
"ids_in": tf.concat([bos, x], axis=0),
"ids_out": tf.concat([x, eos], axis=0),
"ids_in_out": tf.concat([bos, x, eos], axis=0),
"length": tf.shape(x)[0]})
# Filter out invalid examples.
if mode == constants.TRAIN:
dataset = dataset.filter(lambda x: tf.greater(x["length"], min_seq_len - 1))
# Batch the dataset using a bucketing strategy.
dataset = dataset.apply(tf.contrib.data.group_by_window(
_key_func,
_reduce_func,
window_size=batch_size))
return dataset.make_initializable_iterator() | b71c6c8aa1bd2143c911fdd9e7e4ec1526656a39 | 5,162 |
def _get_results(**kwargs):
"""
Generate a command with the parameters, run it, and return the
normalized results
"""
output, error, rc = testoob.run_cmd.run_command(_generate_command(**kwargs))
return tt._normalize_newlines(output), tt._normalize_newlines(error), rc | 83dc64973fe4cfafd56391186361d3dbcc485f7d | 5,163 |
def infer_from_discretized_mix_logistic(params):
"""
Sample from discretized mixture of logistic distributions
Args:
params (Tensor): B x C x T, [C/3,C/3,C/3] = [logit probs, means, log scales]
Returns:
Tensor: sample in range of [-1, 1].
"""
log_scale_min = float(np.log(1e-14))
assert params.shape[1] % 3 == 0
nr_mix = params.shape[1] // 3
# B x T x C
y = params #np.transpose(params, (1, 0))
logit_probs = y[:, :nr_mix]
temp = np.random.uniform(low=1e-5, high=1.0 - 1e-5, size=logit_probs.shape)
temp = logit_probs - np.log(- np.log(temp))
argmax = np.argmax(temp, axis=-1)
one_hot = get_one_hot(argmax, nr_mix).astype(dtype=float)
means = np.sum(y[:, nr_mix:2 * nr_mix] * one_hot, axis=-1)
log_scales = np.clip(np.sum(
y[:, 2 * nr_mix:3 * nr_mix] * one_hot, axis=-1), a_min=log_scale_min, a_max=None)
u = np.random.uniform(low=1e-5, high=1.0 - 1e-5, size=means.shape)
x = means + np.exp(log_scales) * (np.log(u) - np.log(1. - u))
x = np.clip(x, a_min=-1., a_max=1.)
return x | 993e5c64abd0b623057256b868c7e94570e28574 | 5,164 |
from pathlib import Path
def _load_reft_data(reft_file, index_name="btl_fire_num"):
"""
Loads reft_file to dataframe and reindexes to match bottle data dataframe
"""
reft_data = pd.read_csv(reft_file, usecols=["btl_fire_num", "T90", "REFTMP_FLAG_W"])
reft_data.set_index(index_name)
reft_data["SSSCC_TEMP"] = Path(reft_file).stem.split("_")[0]
reft_data["REFTMP"] = reft_data["T90"]
return reft_data | c9ae2a9d5212f5d9234fc95fb4cc008688db07b4 | 5,166 |
def commit_veto(environ, status, headers):
"""Veto a commit.
This hook is called by repoze.tm in case we want to veto a commit
for some reason. Return True to force a rollback.
By default we veto if the response's status code is an error code.
Override this method, or monkey patch the instancemethod, to fine
tune this behaviour.
"""
return not 200 <= int(status.split(None, 1)[0]) < 400 | 9fc96fe8cdbedde20cb325e189b71d9df94cf176 | 5,167 |
def rate_limited_api(view_func):
"""
Checks users last post to rate limited endpoints
(adding comments or recipes) and rejects if within timeout period
for api requests (returns JSON response)
"""
@wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
exceeded, msg = request.user.profile.rate_limit_exceeded()
if exceeded:
return JsonResponse({"error": msg})
else:
return view_func(request, *args, **kwargs)
return _wrapped_view | 3db16cd742339015efbbb9016a32a91e902453a3 | 5,168 |
from .tvdfunctions import CalculateTVD
from .backend import fetchoptions as fo
from .backend.exceptions import TVDLimiterFunctionInputError
def SecondOrderTVD(Uo, Courant, diffX, LimiterFunc, Limiter, Eps=0.01):
"""Return the numerical solution of dependent variable in the model eq.
This function uses the
explicit second-order TVD method and their various
Limiter functions and Limiters
to obtain the solution of the 1D non-linear viscous Burgers equation.
Call signature:
SecondOrderTVD(Uo, Courant, diffX, LimiterFunc, Limiter, Eps)
Parameters
----------
Uo: ndarray[float], =1d
The dependent variable at time level, n within the entire domain.
(Non-dimensionalized quantity)
Courant: float
Courant number that appears in the convection component of the PDE.
diffX: float
Diffusion number for x-component that appears in the diffusion
component of the PDE.
LimiterFunc: str
Flux limiter function.
Limiter:
Limiter type.
Eps: float, optional
A positive constant in the entropy correction term, si in Eq. 6-127
in CFD Vol. 1 by Hoffmann. Its value must be between 0 and 0.125.
Default is 0.1.
Returns
-------
U: ndarray[float], =1d
The dependent variable at time level, n+1 within the entire domain.
(Non-dimensionalized quantity)
"""
shapeU = Uo.shape # Obtain Dimension
if len(shapeU) == 2:
raise DimensionError("2D", "viscous Bergers", "second-order TVD")
iMax, = shapeU
U = Uo.copy() # Initialize U
E = Uo*Uo/2
fetch = fo.FetchOptions()
limfunc_options = fetch.TVDLimiterFunctionOptions()
if LimiterFunc not in limfunc_options:
raise TVDLimiterFunctionInputError(LimiterFunc)
for i in range(2, iMax-2):
phiPlus, phiMinus = CalculateTVD(i, Uo, E, Eps, Courant,
Limiter, LimiterFunc)
# Equation 6-124 and 6-125 in Hoffmann Vol. 1
hPlus = 0.5 * (E[i+1]+E[i]+phiPlus)
hMinus = 0.5 * (E[i]+E[i-1]+phiMinus)
# Calculate diffusion terms in the viscous Bergers equation.
# Equation 7-58
diffusion = diffX*(Uo[i+1] - 2.0*Uo[i] + Uo[i-1])
# Equation 6-123
U[i] = Uo[i] - Courant*(hPlus-hMinus) + diffusion
return U | 3433d3af49d1972868af7e21f02249c82de1a549 | 5,169 |
def login_required(func):
""" Allow only auth users """
async def wrapped(self, *args, **kwargs):
if self.request.user is None:
add_message(self.request, "LogIn to continue.")
redirect(self.request, "sign_in")
return await func(self, *args, **kwargs)
return wrapped | 80837caa726ce46e4728141208a575b25fe5dcb6 | 5,170 |
def _hunnyb_search_func(name):
"""search function required by ``codecs.register``"""
if name in (HUNNYB_ENC_NAME,) + HB_ALIASES:
return (_encode, _decode, None, None) | c9b1a6b68da2706d7568858d7211593e0bfa4086 | 5,171 |
def fingerprint_file(file):
"""Open, read file and calculate MD5 on its contents"""
with open(file,'rb') as fd:
# read contents of the file
_file_data = fd.read()
# pipe contents of the file through
file_fingerprint = md5(_file_data).hexdigest()
return file_fingerprint | 030412ad6a057b2cd2aae4032e6122df73817e41 | 5,172 |
def toOTLookup(self, font, ff):
"""Converts a fontFeatures.Routine object to binary.
Args:
font: A ``TTFont`` object.
ff: The parent ``FontFeatures`` object containing this routine.
Returns a list of ``fontTools.otlLib.builder`` Builder objects allowing this
routine to be converted to binary layout format.
"""
lookuptypes = [x.lookup_type() for x in self.rules]
if not all([lu == lookuptypes[0] for lu in lookuptypes]):
raise ValueError("For now, a routine can only contain rules of the same type")
if not all([self.rules[0].flags == rule.flags for rule in self.rules]):
raise ValueError("For now, a routine can only contain rules of the same flags")
self.flags = self.rules[0].flags
if self.stage == "pos":
return buildPos(self, font, lookuptypes[0], ff)
if self.stage == "sub":
return buildSub(self, font, lookuptypes[0], ff) | ea08870cfec146135584bb8e85f2e861adfa3e05 | 5,173 |
def apply_to_all(func, results, datasets):
"""Apply the given function to all results
Args:
func: the function to apply
results: nested dictionary where the nested levels are: algorithm name, sensitive attribute
and split ID
datasets: nested dictionary where the nested levels are: sensitive attribute and split ID
Returns:
a nested dictionary with the same structure as `results` that contains the output of the
given function
"""
output = {}
for algo in results:
output[algo] = {}
for sensitive in results[algo]:
output[algo][sensitive] = {}
for split_id in results[algo][sensitive]:
output[algo][sensitive][split_id] = func(
results[algo][sensitive][split_id], datasets[sensitive][split_id])
return output | 6ea085b3541a84ac97f63389ba83c3a06d5e0b85 | 5,174 |
def any_value_except(mapping, excluded_keys):
"""Return a random value from a dict that is not associated with
excluded_key. Raises StopIteration if there are no other keys than
excluded_key"""
return next(mapping[key] for key in mapping if key not in excluded_keys) | 8d633713b93cfd1f0324d5c4a56a18fa7931ff06 | 5,175 |
import torch
def one_hot(y, num_dim=10):
"""
One Hot Encoding, similar to `torch.eye(num_dim).index_select(dim=0, index=y)`
:param y: N-dim tenser
:param num_dim: do one-hot labeling from `0` to `num_dim-1`
:return: shape = (batch_size, num_dim)
"""
one_hot_y = torch.zeros(y.size(0), num_dim)
if y.is_cuda:
one_hot_y = one_hot_y.cuda()
return one_hot_y.scatter_(1, y.view(-1, 1), 1.) | 694bfea18ecbb5c5737e0d38c0aa0f5f52a82a55 | 5,176 |
def IdentityMatrix():
"""Creates an identity rotation matrix.
Returns a rotation matrix that has no effect on orientation.
This matrix can be the starting point for other operations,
such as using a series of calls to #Pivot to
create a custom rotation matrix.
Returns
-------
RotationMatrix
The identity rotation matrix.
"""
return RotationMatrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
]) | f156a67000fb36360134d3c696dc9caefebf736a | 5,178 |
def compute_K_from_vanishing_points(vanishing_points):
"""Compute intrinsic matrix given vanishing points.
Args:
vanishing_points: A list of vanishing points.
Returns:
K: The intrinsic camera matrix (3x3 matrix).
"""
# vanishing points used
v1 = vanishing_points[0]
v2 = vanishing_points[1]
v3 = vanishing_points[2]
# construct constraint matrix A from each pair of vanishing points
A = np.zeros((3, 3))
# 1 + 2
vi = v1
vj = v2
A[0] = np.array([(vi[0]*vj[0]+vi[1]*vj[1]), (vi[0]+vj[0]), (vi[1]+vj[1])])
# 1 + 3
vi = v1
vj = v3
A[1] = np.array([(vi[0]*vj[0]+vi[1]*vj[1]), (vi[0]+vj[0]), (vi[1]+vj[1])])
# 2 + 3
vi = v2
vj = v3
A[2] = np.array([(vi[0]*vj[0]+vi[1]*vj[1]), (vi[0]+vj[0]), (vi[1]+vj[1])])
# add one column of ones
A_ones = np.ones((A.shape[0], 1))
A = np.hstack((A, A_ones))
# SVD
U, s, VT = np.linalg.svd(A)
w = VT[-1, :]
omega = np.array([[w[0], 0, w[1]],
[0, w[0], w[2]],
[w[1], w[2], w[3]]])
# find K matrix from omega
KT_inv = np.linalg.cholesky(omega)
K = np.linalg.inv(KT_inv.T)
# normalize
K /= K[2, 2]
return K | 972cba32caee46d9d9c7ed30a3f4ad23bfafe070 | 5,179 |
def _tpd2vec(seq, dtype=float):
"""
Convert a tpd file string to a vector, return a NumPy array.
EXAMPLES:
>>> _tpd2vec('1|13|4; 20; 25|28')
array([ 1., 5., 9., 13., 20., 25., 26., 27., 28.])
>>> _tpd2vec('5.5; 1.2@3; 3|7|2')
array([ 5.5, 1.2, 1.2, 1.2, 3. , 5. , 7. ])
>>> _tpd2vec(' ')
array([], dtype=float64)
"""
finalvec = np.array([], dtype)
for s in seq.split(';'):
if s.count('|'):
values = [dtype(v) for v in s.split('|')]
values[1] += 1
vec = np.arange(*values)
elif s.count('@'):
value, num = s.split('@')
try:
vec = np.ones(int(num)) * dtype(value)
except ValueError:
raise ValueError('%s is incorrectly specified' % seq)
else:
try:
vec = [dtype(s)]
except ValueError:
vec = np.array([], dtype)
finalvec = np.append(finalvec, vec)
return finalvec | c561852d27025fc4f7db7f027fba0e18b2ca157c | 5,181 |
from typing import Dict
def get_notification_html(*, notification_type: str, options: Dict, sender: str) -> str:
"""
Returns the formatted html for the notification based on the notification_type
:return: A string representing the html markup to send in the notification
"""
validate_options(options=options)
url_base = app.config['FRONTEND_BASE']
resource_url = '{url_base}{resource_path}?source=notification'.format(resource_path=options.get('resource_path'),
url_base=url_base)
joined_chars = resource_url[len(url_base) - 1:len(url_base) + 1]
if joined_chars.count('/') != 1:
raise Exception('Configured "FRONTEND_BASE" and "resource_path" do not form a valid url')
notification_strings = NOTIFICATION_STRINGS.get(notification_type)
if notification_strings is None:
raise Exception('Unsupported notification_type')
greeting = 'Hello,<br/>'
notification = notification_strings.get('notification', '').format(resource_url=resource_url,
resource_name=options.get('resource_name'),
sender=sender)
comment = notification_strings.get('comment', '')
end_note = notification_strings.get('end_note', '')
salutation = '<br/>Thanks,<br/>Amundsen Team'
if notification_type == NotificationType.METADATA_REQUESTED:
options_comment = options.get('comment')
need_resource_description = options.get('description_requested')
need_fields_descriptions = options.get('fields_requested')
if need_resource_description and need_fields_descriptions:
notification = notification + 'and requests improved table and column descriptions.<br/>'
elif need_resource_description:
notification = notification + 'and requests an improved table description.<br/>'
elif need_fields_descriptions:
notification = notification + 'and requests improved column descriptions.<br/>'
else:
notification = notification + 'and requests more information about that resource.<br/>'
if options_comment:
comment = ('<br/>{sender} has included the following information with their request:'
'<br/>{comment}<br/>').format(sender=sender, comment=options_comment)
if notification_type == NotificationType.DATA_ISSUE_REPORTED:
greeting = 'Hello data owner,<br>'
data_issue_url = options.get('data_issue_url')
comment = comment.format(data_issue_url=data_issue_url)
return '{greeting}{notification}{comment}{end_note}{salutation}'.format(greeting=greeting,
notification=notification,
comment=comment,
end_note=end_note,
salutation=salutation) | 7996c8f472de89498b04ed6563b893381f680209 | 5,182 |
def step(parents: be.Population, fitness: be.Fitness) -> tuple:
"""
The step function defines how an algorithm generation will be conducted. This function must receive a population and
a fitness object and return another population. In this case we will define the parameters of the algorithm within
the function itself and use report objects to monitor the evolution of the population.
In this algorithm the main steps consists of:
1. Get elite -> Elite
2. Apply tournament selection -> Best individuals
3. Apply one point cross over to best individuals -> Offspring
4. Mutate offspring
5. Evaluate offspring
6. Annihilate worst individuals in offspring and replace them with the best.
7. Merge elite and offspring -> Population for next generation
"""
# Put parameters
recombination_schema = 'one_point_i' # Alternatives: 'n_point_i' or 'uniform_i'
mutation_schema = 'random_resetting' # Alternatives: 'creep'
mutation_probability = 0.1
max_mutation_events = 2
ranking_selection_schema = 'tournament' # Alternatives: 'roulette' or 'sus'
tournament_k = 2
tournament_w = 1
tournament_replacement = False
elitism_percentage = 0.1
# Get elite
elite = be.survivor_selection(population=parents, schema='elitism', select=elitism_percentage)
# Apply selection to get the mating pool
mating_pool = be.ranking_selection(
population=parents, n=len(parents) - len(elite), schema=ranking_selection_schema,
w=tournament_w, k=tournament_k, replacement=tournament_replacement)
# Generate offspring
offspring = be.recombination(population=mating_pool, n=len(mating_pool), schema=recombination_schema)
# Mutate offspring
be.mutation(population=offspring, probability=mutation_probability, schema=mutation_schema,
max_mutation_events=max_mutation_events)
# Evaluate offspring
be.evaluate_parallel(population=offspring, fitness_function=fitness)
# Merge elite and offspring
next_generation = be.merge_populations(offspring, elite)
report.create_report(population=next_generation, population_name='Population', increment_generation=True)
# With this indicator we keep the best solution of each generation
return next_generation, be.SaveBestSolution(next_generation) | 700c5a9a28145b9454fc68356eab328a84418461 | 5,183 |
def asset_dividend_record(self, **kwargs):
"""Asset Dividend Record (USER_DATA)
Query asset dividend record.
GET /sapi/v1/asset/assetDividend
https://binance-docs.github.io/apidocs/spot/en/#asset-dividend-record-user_data
Keyword Args:
asset (str, optional)
startTime (int, optional)
endTime (int, optional)
limit (int, optional): Default 20, max 500
recvWindow (int, optional): The value cannot be greater than 60000
"""
return self.sign_request("GET", "/sapi/v1/asset/assetDividend", kwargs) | 80ecbf4f03bb4431829130f3da546b937cf53d13 | 5,184 |
def heuristic(node_1, node_2):
""" Heuristic when only 4 directions are posible (Manhattan) """
(x_node_1, y_node_1) = node_1
(x_node_2, y_node_2) = node_2
return abs(x_node_1 - x_node_2) + abs(y_node_1 - y_node_2) | e431ed9d8a7acb34604b3e83c3f3d7774cd27d51 | 5,185 |
def exercise_2(inputs): # DO NOT CHANGE THIS LINE
"""
Output should be the name of the class.
"""
output = Party
return output # DO NOT CHANGE THIS LINE | 6bf574921760aa2569d0a44ced8b9a3712d67faa | 5,186 |
def undistort(img, mtx, dist):
"""Undistort an image using camera matrix and distortion coefficients"""
h, w = img.shape[:2]
# return undistorted image with minimum unwanted pixels. It's okay to remove some pixesl at image corners.
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 0, (w,h))
undist = cv2.undistort(img, mtx, dist, None, newcameramtx)
return undist | e8d32a8662a998c90f856116b97e555f2bdfeee4 | 5,187 |
def get_order(order_id, sandbox=False):
"""Get a single order using the Sell Fulfillment API."""
return single_api_call('sell_fulfillment_get_order', order_id=order_id,
field_groups='TAX_BREAKDOWN', sandbox=sandbox) | 74054dc63e6d57f162f6099389fa9c1870d8e08d | 5,188 |
def extend_dict(x, *y):
"""Similar to Object.assign() / _.extend() in Javascript, using
'dict.update()'
Args:
x (dict): the base dict to merge into with 'update()'
*y (dict, iter): any number of dictionary or iterable key/value
pairs to be sequentially merged into 'x'. Skipped if None.
"""
z = x.copy()
for d in [d for d in y if d is not None]:
z.update(d)
return z | f10a5bc7d5ed3646e6a9f8f9535a16bd800c7fcd | 5,189 |
def ErrorCriteria(errors):
"""Monitor the number of unexpected errors logged in the cluster. If more than five
errors have occurred on the cluster during this time period, post an alert. Posts a
warning if between one and four errors have occurred.
"""
ERROR_ALERT_THRESHOLD = 5
alerts = []
warnings = []
if errors['cluster_total'] > ERROR_ALERT_THRESHOLD:
alerts.append(CLUSTER_TOKEN)
elif errors['cluster_total'] > 0:
warnings.append(CLUSTER_TOKEN)
return alerts, warnings | 0b388ca55009bb5219bd30ead91ce67521c0e743 | 5,190 |
def bdnyc_skyplot():
"""
Create a sky plot of the database objects
"""
# Load the database
db = astrodb.Database('./database.db')
t = db.query('SELECT id, ra, dec, shortname FROM sources', fmt='table')
# Convert to Pandas data frame
data = t.to_pandas()
data.index = data['id']
# Remove objects without RA/Dec
num_missing = np.sum(pd.isnull(data['ra']))
if num_missing > 0:
warning_message = 'Note: {} objects had missing coordinate information and were removed.'.format(num_missing)
data = data[pd.notnull(data['ra'])]
else:
warning_message = ''
# Coerce to numeric
data['ra'] = pd.to_numeric(data['ra'])
data['dec'] = pd.to_numeric(data['dec'])
# Coordinate conversion
c = SkyCoord(ra=data['ra'] * u.degree, dec=data['dec'] * u.degree)
pi = np.pi
proj = 'hammer'
data['x'], data['y'] = projection(c.ra.radian - pi, c.dec.radian, use=proj)
data['l'], data['b'] = c.galactic.l, c.galactic.b
# Make the plots
p1 = make_sky_plot(data, proj)
data['x'], data['y'] = projection(c.galactic.l.radian - pi, c.galactic.b.radian, use=proj)
p2 = make_sky_plot(data, proj)
tab1 = Panel(child=p1, title="Equatorial")
tab2 = Panel(child=p2, title="Galactic")
tabs = Tabs(tabs=[tab1, tab2])
script, div = components(tabs)
return render_template('skyplot.html', script=script, plot=div, warning=warning_message) | 7ceba0d0b5cf151e5629fefa943fa1a48f62d430 | 5,191 |
def get_model_config(model_name, dataset, params):
"""Map model name to model network configuration."""
model_map = _get_model_map(dataset.name)
if model_name not in model_map:
raise ValueError('Invalid model name \'%s\' for dataset \'%s\'' %
(model_name, dataset.name))
else:
return model_map[model_name](params=params) | 88ce2fbb3415b0d5fa2348b9f9ba5dd029e49a73 | 5,192 |
def post_search(request):
"""Allow text matching search. """
form = SearchForm()
query = None
results = []
if 'query' in request.GET: # check if result is submitted by looking for query
form = SearchForm(request.GET)
if form.is_valid():
query = form.cleaned_data['query']
# results = Post.objects.annotate(search=SearchVector('title','body'),).filter(search=query)
# a search is more relevant if the search term is in the title
"""
Search weights are D,C,B and A corresponding to 0.1,0.2,0.4 and 1.0
"""
search_vector = SearchVector('title', weight='A') + SearchVector('body',weight='B')
search_query = SearchQuery(query)
# filter results to display only the ones ranking higher than 0.3
results = Post.objects.annotate(search=search_vector,rank=SearchRank(search_vector,search_query)
).filter(rank__gte=0.3).order_by('-rank')
return render(request,'blog/post/search.html', {'form':form, 'query':query, 'results':results}) | ff6f36f28a0dbaaba8957049eb2fc64ff76470dc | 5,193 |
import numba
def PrimacyCodingNumeric_receptor_activity_monte_carlo_numba_generator(conc_gen):
""" generates a function that calculates the receptor activity for a given
concentration generator """
func_code = receptor_activity_monte_carlo_numba_template.format(
CONCENTRATION_GENERATOR=conc_gen)
# make sure all necessary objects are in the scope
scope = {'np': np, 'nlargest_indices_numba': nlargest_indices_numba}
exec(func_code, scope)
func = scope['function']
return numba.jit(nopython=NUMBA_NOPYTHON, nogil=NUMBA_NOGIL)(func) | 8ca9758227fe6b7e57269e929a6a7dc4a7d6b549 | 5,194 |
def subtoken_counts(proposed, ground_truth):
"""
Compute the number of precise tokens, proposed tokens and ground truth tokens
from two strings representing tokens.
"""
gt_subtokens = set(compute_subtokens(ground_truth))
proposed_subtokens = set(compute_subtokens(proposed))
precise_subtokens = proposed_subtokens.intersection(gt_subtokens)
return len(precise_subtokens), len(proposed_subtokens), len(gt_subtokens) | 496abf452a09c521b71acfe2951232b5a4c7b40d | 5,195 |
import random
def welcome():
""" Define welcome reply """
hello = random.choice(_HELLO_)
nick = random.choice(_NICK_NAME_)
welcome = random.choice(_WELCOME_)
proposal = random.choice(_PROPOSAL_)
return hello + " " + nick + ", " + welcome + " ! " + proposal + " ?" | 87da460bde7bae59e54c108a68291d8c3b4258de | 5,196 |
def EDCN(linear_feature_columns,
dnn_feature_columns,
bridge_type='attention_pooling',
tau=0.1,
use_dense_features=True,
cross_num=2,
cross_parameterization='vector',
l2_reg_linear=1e-5,
l2_reg_embedding=1e-5,
l2_reg_cross=1e-5,
l2_reg_dnn=0,
seed=10000,
dnn_dropout=0,
dnn_use_bn=False,
dnn_activation='relu',
task='binary'):
"""Instantiates the Enhanced Deep&Cross Network architecture.
:param linear_feature_columns: An iterable containing all the features used by linear part of the model.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param bridge_type: The type of bridge interaction, one of 'pointwise_addition', 'hadamard_product', 'concatenation', 'attention_pooling'
:param tau: Positive float, the temperature coefficient to control distribution of field-wise gating unit
:param use_dense_features: Whether to use dense features, if True, dense feature will be projected to sparse embedding space
:param cross_num: positive integet,cross layer number
:param cross_parameterization: str, ``"vector"`` or ``"matrix"``, how to parameterize the cross network.
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_cross: float. L2 regularizer strength applied to cross net
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param seed: integer ,to use as random seed.
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not DNN
:param dnn_activation: Activation function to use in DNN
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
if cross_num == 0:
raise ValueError("Cross layer num must > 0")
if bridge_type == 'pointwise_addition':
BridgeLayer = tf.keras.layers.Add
elif bridge_type == 'hadamard_product':
BridgeLayer = tf.keras.layers.Multiply
elif bridge_type == 'concatenation':
BridgeLayer = ConcatenationBridge
elif bridge_type == 'attention_pooling':
BridgeLayer = AttentionPoolingLayer
else:
raise NotImplementedError
print('EDCN brige type: ', bridge_type)
features = build_input_features(dnn_feature_columns)
inputs_list = list(features.values())
linear_logit = get_linear_logit(features,
linear_feature_columns,
seed=seed,
prefix='linear',
l2_reg=l2_reg_linear)
sparse_embedding_list, dense_value_list = input_from_feature_columns(
features, dnn_feature_columns, l2_reg_embedding, seed)
# project dense value to sparse embedding space, generate a new field feature
if use_dense_features:
sparse_embedding_dim = sparse_embedding_list[0].shape[-1]
dense_value_feild = concat_func(dense_value_list)
dense_value_feild = DNN([sparse_embedding_dim], dnn_activation,
l2_reg_dnn, dnn_dropout,
dnn_use_bn)(dense_value_feild)
dense_value_feild = tf.expand_dims(dense_value_feild, axis=1)
sparse_embedding_list.append(dense_value_feild)
deep_in = sparse_embedding_list
cross_in = sparse_embedding_list
field_size = len(sparse_embedding_list)
cross_dim = field_size * cross_in[0].shape[-1]
for i in range(cross_num):
deep_in = RegulationLayer(tau)(deep_in)
cross_in = RegulationLayer(tau)(cross_in)
cross_out = CrossNet(1, parameterization=cross_parameterization,
l2_reg=l2_reg_cross)(deep_in)
deep_out = DNN([cross_dim], dnn_activation, l2_reg_dnn,
dnn_dropout, dnn_use_bn, seed=seed)(cross_in)
bridge_out = BridgeLayer()([cross_out, deep_out])
bridge_out_list = tf.split(tf.expand_dims(bridge_out, axis=1), field_size, axis=-1)
deep_in = bridge_out_list
cross_in = bridge_out_list
stack_out = tf.keras.layers.Concatenate()(
[cross_out, deep_out, bridge_out])
final_logit = tf.keras.layers.Dense(1, use_bias=False,
kernel_initializer=tf.keras.initializers.glorot_normal(seed))(stack_out)
final_logit = add_func([final_logit, linear_logit])
output = PredictionLayer(task)(final_logit)
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model | 61e3f6868613111001420d88b8c9b99f91361653 | 5,197 |
def cart_to_polar(arr_c):
"""Return cartesian vectors in their polar representation.
Parameters
----------
arr_c: array, shape (a1, a2, ..., d)
Cartesian vectors, with last axis indexing the dimension.
Returns
-------
arr_p: array, shape of arr_c
Polar vectors, using (radius, inclination, azimuth) convention.
"""
if arr_c.shape[-1] == 1:
arr_p = arr_c.copy()
elif arr_c.shape[-1] == 2:
arr_p = np.empty_like(arr_c)
arr_p[..., 0] = vector_mag(arr_c)
arr_p[..., 1] = np.arctan2(arr_c[..., 1], arr_c[..., 0])
elif arr_c.shape[-1] == 3:
arr_p = np.empty_like(arr_c)
arr_p[..., 0] = vector_mag(arr_c)
arr_p[..., 1] = np.arccos(arr_c[..., 2] / arr_p[..., 0])
arr_p[..., 2] = np.arctan2(arr_c[..., 1], arr_c[..., 0])
else:
raise Exception('Invalid vector for polar representation')
return arr_p | c4c2256fcc9b01849dc4012ceac017273dcc4ddb | 5,198 |
def createList(listSize):
"""
Creates list block that creates input instances for each element and an output instance for connecting to
the resulting list. List size is limited to 300 elements. Larger lists will be truncated.
:param listSize: The size of the list of point inputs that will be created
:return: A list of the input instances and the output of the list block
"""
listInst = psc.createInstance("ListBlock", "ListInstance")
inputInstances = [None] * listSize
psc.connect(psc.Constant((listSize, 0, 0)), listInst.size)
for i in range(listSize):
inputInstances[i] = getattr(listInst, "element" + str(i))
return {"inputs":inputInstances, "output":listInst.out} | 91b508674ad6f26e9e7dd43cb372fb0804db7ccd | 5,199 |
def trash_description(spl, garbage, keyword, description="description_1"):
"""description_1 OR description_2"""
relocate = spl[spl[description].str.contains(keyword, na=False, regex=True)]
spl = spl[~spl[description].str.contains(keyword, na=False, regex=True)]
garbage = pd.concat([garbage, relocate], ignore_index=True, sort=False)
return (spl, garbage, relocate) | 16a1512ddaf914bd5ebcd00f2dcdfa11d59ec73c | 5,201 |
import random
def prepositionalPhrase():
"""Builds and returns a prepositional phrase."""
return random.choice(prepositions) + " " + nounPhrase() | 33a6f1111f752c160ef90eedde4bf56b79b1100a | 5,202 |
def check_possible_dtype(df):
"""Guess dtypes for each column in a dataframe, where dataframe must contains only string values.
Raise an exception if dataframe contains non-string values.
:param df: a DataFrame whose all values must be strings.
"""
column = []
int_cnt = []
dec_cnt = []
str_cnt = []
d = {"column": column, "int_cnt": int_cnt, "dec_cnt": dec_cnt, "str_cnt": str_cnt}
for i in df.columns:
ser = df[i].drop_duplicates()
column.append(i)
int_cnt.append(ser.apply(lambda x: is_int_str(x)).sum())
dec_cnt.append(ser.apply(lambda x: is_dec_str(x)).sum())
str_cnt.append(ser.apply(lambda x: not is_number_str(x)).sum())
dtype_options_df = pd.DataFrame(d, columns=["column", "int_cnt", "dec_cnt", "str_cnt"])
# Best-effort guess on dtype
guessed_dtype = dtype_options_df.apply(guess_dtype, axis=1).rename("guessed_type_for_non_nan")
return pd.concat([dtype_options_df, guessed_dtype], axis=1) | 0e9759959af04fbf1bb9db3672f6a188afe7f6ab | 5,203 |
from typing import List
def filter_objects_avoiding_duplicated(objects: List[Object],
max_distance: int = 20) -> List[Object]:
"""Filtra los objetos evitando aquellas posibles que sean detecciones múltiples.
El fundamento del algoritmo es que si se detectan dos objetos con un centroide muy cercano, a
una distancia máxima indicada por ``max_distance``, entonces es una detección múltiple.
El conflicto se resuelve eliminando las detecciones múltiple y escogiendo la que mejor
puntuación ha obtenido en la detección.
:param objects: lista de objetos.
:param max_distance: máxima distancia entre centros para considerar que ese objeto puede ser
un duplicado.
:return: lista de objetos filtrados.
"""
# Lista de las posiciones en 'objects' de los objetos eliminados.
removed_objects_id = list()
# Buscar los posibles candidatos para cada objeto.
for obj_id, obj_detection in enumerate(objects):
for candidate_id, candidate_detection in enumerate(objects):
# Ignorar el mismo objeto como posible candidato.
if obj_id == candidate_id:
continue
# Ignorar si alguno de los que se está comparando ha sido eliminado ya.
if obj_id in removed_objects_id or candidate_id in removed_objects_id:
continue
# Calcular la distancia euclídea entre ambas detecciones.
p = np.array(obj_detection.center)
q = np.array(candidate_detection.center)
distance = np.linalg.norm(p - q)
# Si hay poca distancia, puede ser el mismo objeto.
if distance <= max_distance:
# Eliminar el que menos puntuación tiene.
if obj_detection.score > candidate_detection.score:
removed_objects_id.append(candidate_id)
else:
removed_objects_id.append(obj_id)
# Lista de los objetos que han pasado el filtro.
objects_filtered: List[Object] = list()
for obj_id, obj_detection in enumerate(objects):
if obj_id not in removed_objects_id:
objects_filtered.append(obj_detection)
return objects_filtered | 042fee5df94dc1c72fb53635577c8006c57f73f9 | 5,204 |
def print_hdr(soup, hdr, file = None):
"""
:param soup: [bs4.BeautifulSoup] document context
:param hdr: [dict] header node to process
:param file: [stream] I/O stream to print to
:return: [stream] pass on the I/O stream so descent continues
"""
tag = hdr['tag']
tag_id = tag['id']
indent = (hdr['level'] - 1) * ' '
# do this replacement for (relative) readability
content_tags = ["<%s>" % (h.name) if h.name else h.string for h in hdr['content']]
print("%s%s - %s %s" % (indent, tag.name, tag_id, content_tags), file=file)
return file | 2c6fd613a5c6ddb5ec842fb7cee845d1a8771ccd | 5,207 |
from unittest.mock import Mock
def __empty_2():
""" Empty used as parent of cube_2 """
obj = Mock()
obj.name = 'empty_2'
obj.mode = 'OBJECT'
obj.to_mesh.return_value = None
obj.matrix_world = Matrix.Identity(4)
obj.visible_get.return_value = False
obj.hide_viewport = True
obj.hide_render = True
return obj | 024614d7967da5da6d6629167a20eda4188e812f | 5,208 |
def get_gradient(bf_data: np.ndarray, smooth=10):
"""
Removes first dimension,
Computes gradient of the image,
applies gaussian filter
Returns SegmentedImage object
"""
data = strip_dimensions(bf_data)
gradient = get_2d_gradient(data)
smoothed_gradient = gaussian_filter(gradient, smooth)
# sm = multiwell.gaussian_filter(well, smooth)
return smoothed_gradient.reshape(bf_data.shape) | 864b3bc118d08099c56657b2f2883e20de5c663e | 5,210 |
def sum_seq(seq):
""" Lambda wrapper for sum. """
return K.sum(seq, axis=1, keepdims=False) | e2bf342f6cda9bda50dc15814c7808a42e8a9925 | 5,211 |
def split_by_time(files_rad):
"""Separate a list of files by their timestamp"""
out = {}
if type(files_rad) == dict:
for k in files_rad.keys():
out[k] = _split_by_time(files_rad[k])
else:
out = _split_by_time(files_rad)
return out | 9a77b3db2e21c27198337b1a1852494bca5acefb | 5,212 |
def make_general_csv_rows(general_csv_dict):
"""
Method for make list of metrics from general metrics dict.
Rows using in general metrics writer
:param general_csv_dict: dict with all metrics
:type general_csv_dict: dict
:return: all metrics as rows
:rtype: list
"""
rows = []
for key, value in general_csv_dict.items():
row = [key[0], key[1]]
row.extend(value)
rows.append(row)
return rows | 45ca165d312b39cd0b7088e0bcbfb402a92e7e2b | 5,213 |
def build_hstwcs(crval1, crval2, crpix1, crpix2, naxis1, naxis2, pscale, orientat):
""" Create an HSTWCS object for a default instrument without distortion
based on user provided parameter values.
"""
wcsout = wcsutil.HSTWCS()
wcsout.wcs.crval = np.array([crval1,crval2])
wcsout.wcs.crpix = np.array([crpix1,crpix2])
wcsout.naxis1 = naxis1
wcsout.naxis2 = naxis2
wcsout.wcs.cd = fileutil.buildRotMatrix(orientat)*[-1,1]*pscale/3600.0
# Synchronize updates with PyWCS/WCSLIB objects
wcsout.wcs.set()
wcsout.setPscale()
wcsout.setOrient()
wcsout.wcs.ctype = ['RA---TAN','DEC--TAN']
return wcsout | 0247a8dc7e6aa083db50f21d82676216583be206 | 5,214 |
def build_regressor_for_ranking_positive_class(dataset, features, regression_target=TARGET_COLUMN):
"""This function builds a regressor based exclusively on positive class'
examples present in the dataset
"""
if regression_target in features:
print('The target for the regression task cannot be one of the features')
return
positive_examples = dataset.loc[dataset[TARGET_COLUMN] > ALPHA]
X = positive_examples[features]
y = positive_examples[regression_target]
regressor = RandomForestRegressor(random_state=20)
regressor.fit(X, y)
return regressor | 1312751425f79c1e4fec09f705f0ea551e2a60b3 | 5,215 |
def get_speakable_timestamp(timestamp):
"""Return a 'speakable' timestamp, e.g. 8am, noon, 9pm, etc."""
speakable = f"{timestamp.strftime('%I').lstrip('0')} {timestamp.strftime('%p')}"
if speakable == '12 PM':
return 'noon'
elif speakable == '12 AM':
return 'midnight'
return speakable | 0b724686ebd5d3152d9017dc456d2945c78be0ee | 5,216 |
def createColor(red: int, green: int, blue: int) -> tuple:
"""
Create color
Parameters:
red -> 0-255
green -> 0-255
blue -> 0-255
"""
return tuple(
max(min(red, 255), 0),
max(min(green, 255), 0),
max(min(blue, 255), 0)
) | 3e8ee43e9d458668f4312f9fd75050b5875036d7 | 5,217 |
from typing import List
def export_nodeclass_list(node_classes: List[NodeClass]) -> str:
"""Writes the Node data as a XML string. Does not write
to a file -- use ``with open(output_file) as out_stream:`` etc.
"""
# This is the data string, the rest is formalities
node_classes_string = '\n'.join([str(c) for c in node_classes])
lines = list()
lines.append('<?xml version="1.0" encoding="utf-8"?>')
lines.append('<NodeClasses noNamespaceSchema="mff-muscima-mlclasses.xsd">')
lines.append(node_classes_string)
lines.append('</NodeClasses>')
return '\n'.join(lines) | f50638e9b3a7ab2f1df6e49703b9ed3e39916f9d | 5,218 |
import time
def recognition(request):
"""
style transform service
"""
if request.method == 'POST':
name = ''
predicitons = ''
try:
# load image
now = time.localtime()
img = request.FILES['image']
image_name = '{}{}{}{}{}object.jpg'.format(now[1], now[2], now[3], now[4], now[5])
# get prediction
predicitons = predict_app(img)
# save to database
Image = ContentImage()
Image.name = 'static/images/predict/' + image_name
Image.save()
# save to disk
addr = BASE_DIR + 'predict/' + image_name
save_to_disk(addr, img)
image_url = 'images/predict/' + image_name
except Exception as e:
print(e)
return render(request, 'recognition/basic.html', {})
return render(request, 'recognition/basic.html', {'image_url':image_url, 'predictions': predicitons})
if request.method == 'GET':
return render(request, 'recognition/basic.html', {}) | d8de5ab5c33e6ca0c2ac5afbec81c402f7151187 | 5,219 |
def url(s):
"""Validate url input"""
u = urlparse(s)
if u.scheme not in ["http", "https"]:
raise ValueError(s)
return u.geturl() | 82683af4ad6fb35b6d74409a9a429c4dfd81a723 | 5,220 |
import pickle
def getGPLCs(df, savepath='./',plotpath='./', bands='ugrizY', ts='0000000', fn='GPSet'):
"""Short summary.
Parameters
----------
df : type
Description of parameter `df`.
savepath : type
Description of parameter `savepath`.
plotpath : type
Description of parameter `plotpath`.
bands : type
Description of parameter `bands`.
ts : type
Description of parameter `ts`.
fn : type
Description of parameter `fn`.
Returns
-------
type
Description of returned object.
"""
#num_bands = len(np.unique(band_idx))
Npt = 100
tmin = -30
tmax = 150
num_bands = len(bands)
GP_dict = {}
# make our plots look nice
stylePlots()
for idx, row in df.iterrows():
t = np.array(row["T"])
f = np.array(row["Flux"])
f[f<0.] = 0. #getting rid of negative flux
#the magnitude-like array for the sake of the conversion
y = np.log(f + 1)
yerr = np.array(row["Flux_Err"]) / np.array(row["Flux"])
t_test = np.linspace(tmin, tmax, Npt)
band = row["Filter"]
band_idx = pd.Series(row['Filter']).astype('category').cat.codes.values
matrix = [t_test]
def build_gp(params):
time_kernel = tinygp.kernels.Matern32(jnp.exp(params["log_scale"]))
kernel = Multiband(time_kernel, jnp.exp(params["log_diagonal"]), params["off_diagonal"])
diag = yerr ** 2 + jnp.exp(2 * params["log_jitter"][X[1]])
return tinygp.GaussianProcess(kernel, X, diag=diag, mean=lambda x: params["mean"][x[1]])
#the GP parameters
@jax.jit
def loss(params):
return -build_gp(params).condition(y)
X = (t, band_idx)
solver = jaxopt.ScipyMinimize(fun=loss)
soln = solver.run(params)
gp = build_gp(soln.params)
df_t = []
df_flux = []
df_flux_err = []
df_filt = []
if idx%50 == 0:
plt.figure(figsize=(10,7))
for n in np.unique(band_idx):
m = band_idx == n
plt.errorbar(t[m], np.exp(y[m])-1,yerr=row['Flux_Err'][m], fmt="o", color=f"C{n}")
mu, var = gp.predict(y, X_test=(t_test, np.full_like(t_test, n, dtype=int)), return_var=True)
std = np.sqrt(var)
if idx%50 == 0:
plt.plot(t_test, np.exp(mu)-1, '.-', ms=2, color=f"C{n}")
plt.fill_between(t_test,np.exp(mu - std)-1, np.exp(mu + std)+1, color=f"C{n}", alpha=0.3, label=bands[n])
#going in order of band here--don't forget it!
matrix.append(np.exp(mu)-1)
matrix.append(std)
if idx%50 == 0:
plt.xlim((t_test[0], t_test[-1]))
plt.xlabel("Phase from Trigger (Days)")
plt.ylabel("Flux")
plt.legend()
plt.savefig(plotpath + "/GP_%i.png"%row.CID,dpi=200, bbox_inches='tight')
stacked = np.vstack(matrix)
GP_dict[row.CID] = stacked
with open(savepath + '/%s_%i.pkl'%(fn, ts), 'wb') as f:
pickle.dump(GP_dict, f)
return GP_dict | 755dec48771ae17c058565ef88087d6ec6a78aec | 5,221 |
import torch
def _featurize(inputs,model):
"""
Helper function used to featurize exemplars before feeding into
buffer.
"""
with torch.no_grad():
# Forward pass
outputs = model(*inputs).detach() #Featurize raw exem
return outputs | 191fd1b362f38309a35618284fcf3f1910a06bd6 | 5,222 |
def ligth_condition(img, args):
"""
Change ligthning condition in the image
Inputs:
img: Image to change ligthning
args: Dictionary with "gamma" argument
Return:
Image with ligthning values changed
"""
invGamma = 1.0 / args["gamma"]
table = np.array([((i / 255.0) ** invGamma) * 255 for i in np.arange(0, 256)]).astype("uint8")
return cv2.LUT(img, table) | dc5273a1df8e13292147b00be45452a7ccf4a197 | 5,223 |
import numpy as np
from sklearn.metrics import mean_squared_error
def calc_RMSE(varx,vary,lats,lons,weight):
"""
Calculates root mean square weighted average
Parameters
----------
varx : 2d array
vary : 2d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
rmse : 1d array
Usage
-----
rmse = calc_RMSE(varx,vary,lats,lons)
"""
print('\n>>> Using calc_RMSE function!')
### Import modules
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 30)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
### Calculate rmse
sq_err = (varx - vary)**2
rmse = np.sqrt((np.sum(sq_err*gw))/np.sum(gw))
elif weight == 'no':
### Root mean square error from sklearn (not weighted)
rmse = np.sqrt(mean_squared_error(varx.ravel(),vary.ravel()))
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_RMSE function!')
return rmse | 150d08e0790f3a8ce59a2054cdc042ff6cdc2969 | 5,224 |
def sample(internal_nodes, alpha=0.5, beta=0.5, only_tree=False):
""" Generates a junction tree with order internal nodes with the junction tree expander.
Args:
internal_nodes (int): number of nodes in the underlying graph
alpha (float): parameter for the subtree kernel
beta (float): parameter for the subtree kernel
directory (string): path to
Returns:
NetworkX graph: a junction tree
"""
nodes = None
if type(internal_nodes) is int:
nodes = range(internal_nodes)
else:
nodes = internal_nodes
tree = JunctionTree()
#from trilearn.graph.junction_tree_gt import JunctionTreeGT
#tree = JunctionTreeGT()
tree.add_node(frozenset([nodes[0]]))
# print tree.nodes()
# for n in tree.nodes():
# lab = tuple(n)
# if len(n) == 1:
# lab = "("+str(list(n)[0])+")"
# tree.node[n] = {"color": "black", "label": lab}
for j in nodes[1:]:
if only_tree:
jte.sample(tree, j, alpha, beta, only_tree=only_tree)
else:
(tree, _, _, _, _, _) = jte.sample(tree, j, alpha, beta, only_tree=only_tree)
#print("vert dict: " + str(tree.gp.vert_dict))
#print("nodes: " + str(list(tree.vp.nodes)))
return tree | d0cc00e7ad96491147149aa4be396af970a9f68f | 5,225 |
def _get_version_tuple():
"""
version as a tuple
"""
return major, minor, revision | 1d82390224de07964dce7c4e7fd3e32595b189a0 | 5,226 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.