content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import math
def floatToJson(x):
"""Custom rule for converting non-finite numbers to JSON as quoted strings: ``"inf"``, ``"-inf"``, and ``"nan"``. This avoids Python's bad habit of putting literal ``Infinity``, ``-Infinity``, and ``NaN`` in the JSON (without quotes)."""
if x in ("nan", "inf", "-inf"):
return x
elif math.isnan(x):
return "nan"
elif math.isinf(x) and x > 0.0:
return "inf"
elif math.isinf(x):
return "-inf"
else:
return x | 938700c100d9176f6d950aee9ddf8f90109bedcc | 8,387 |
import math
def systematic_uncertainties():
"""tabulates sources of uncertainty and sums them in quadrature"""
result_m = [
0.066, # [0.07-0.12] 0.066 ± 0.019
0.019, # [0.12-0.20] 0.019 ± 0.009
0.002, # [0.20-0.30] 0.002 ± 0.009
-0.006, # [0.30-0.45] -0.006 ± 0.014
0.007, # [0.45-0.65] 0.007 ± 0.023
0.012 # [0.65-1.00] 0.012 ± 0.040
]
result_p = [
0.026, # [0.07-0.12] 0.026 ± 0.019
0.021, # [0.12-0.20] 0.021 ± 0.008
0.002, # [0.20-0.30] 0.002 ± 0.009
-0.014, # [0.30-0.45] -0.014 ± 0.013
0.024, # [0.45-0.65] 0.024 ± 0.022
0.046 # [0.65-1.00] 0.046 ± 0.037
]
pid_contamination = 0.10
pid_asym_m = [
( 0.051 , 0.038), # [0.07-0.12] 0.051 ± 0.038
(-0.017 , 0.016), # [0.12-0.20] -0.017 ± 0.016
(-0.032 , 0.016), # [0.20-0.30] -0.032 ± 0.016
(-0.006 , 0.023), # [0.30-0.45] -0.006 ± 0.023
(-0.031 , 0.042), # [0.45-0.65] -0.031 ± 0.042
( 0.089 , 0.085) # [0.65-1.00] 0.089 ± 0.085
]
pid_asym_p = [
( 0.005 , 0.036), # [0.07-0.12] 0.005 ± 0.036
( 0.006 , 0.015), # [0.12-0.20] 0.006 ± 0.015
(-0.006 , 0.015), # [0.20-0.30] -0.006 ± 0.015
( 0.018 , 0.020), # [0.30-0.45] 0.018 ± 0.020
(-0.038 , 0.032), # [0.45-0.65] -0.038 ± 0.032
( 0.142 , 0.059) # [0.65-1.00] 0.142 ± 0.059
]
for i in range(len(pid_asym_m)):
val, err = pid_asym_m[i]
pid_asym_m[i] = max( val-result_m[i], err)
for i in range(len(pid_asym_p)):
val, err = pid_asym_p[i]
pid_asym_p[i] = max( val-result_p[i], err)
beam_vector = 0.0102
asigma_m = [
0.035, # [0.07-0.12] 0.005 ± 0.035
0.015, # [0.12-0.20] -0.012 ± 0.015
0.016, # [0.20-0.30] -0.014 ± 0.016
0.027, # [0.30-0.45] -0.027 ± 0.023
0.066, # [0.45-0.65] -0.066 ± 0.040
0.073 # [0.65-1.00] -0.072 ± 0.073
]
asigma_p = [
0.034, # [0.07-0.12] -0.001 ± 0.034
0.014, # [0.12-0.20] -0.007 ± 0.014
0.015, # [0.20-0.30] 0.007 ± 0.015
0.025, # [0.30-0.45] -0.025 ± 0.022
0.039, # [0.45-0.65] -0.039 ± 0.037
0.061 # [0.65-1.00] 0.033 ± 0.061
]
mcasym_m = [
0.0066, # [0.07-0.12] 0.0012 ± 0.0066
0.0057, # [0.12-0.20] 0.0057 ± 0.0025
0.0089, # [0.20-0.30] 0.0089 ± 0.0020
0.0077, # [0.30-0.45] 0.0077 ± 0.0026
0.0042, # [0.45-0.65] 0.0038 ± 0.0042
0.0070 # [0.65-1.00] 0.0053 ± 0.0070
]
mcasym_p = [
0.0047, # [0.07-0.12] -0.0014 ± 0.0047
0.0077, # [0.12-0.20] 0.0077 ± 0.0024
0.0147, # [0.20-0.30] 0.0147 ± 0.0023
0.0105, # [0.30-0.45] 0.0105 ± 0.0024
0.0057, # [0.45-0.65] 0.0057 ± 0.0044
0.0112 # [0.65-1.00] 0.0112 ± 0.0081
]
pt_shift_m = [ 0, 0,
0.003, # [0.20-0.30] 0.006 low, 0.001 high, 0.003 avg
0.005, # [0.30-0.45] 0.007 low, 0.003 high, 0.005 avg
0.016, # [0.45-0.65] 0.020 low, 0.012 high, 0.016 avg
0.010 # [0.65-1.00] 0.011 low, 0.008 high, 0.010 avg
]
pt_shift_p = [ 0, 0,
0.004, # [0.20-0.30] 0.005 low, 0.003 high, 0.004 avg
0.007, # [0.30-0.45] 0.008 low, 0.006 high, 0.007 avg
0.016, # [0.45-0.65] 0.023 low, 0.008 high, 0.016 avg
0.016 # [0.65-1.00] 0.012 low, 0.020 high, 0.016 avg
]
relative_luminosity = 9.4e-4
minus = [0.0 for bin in zbins[:-1]]
plus = [0.0 for bin in zbins[:-1]]
start = len(zbins) == 5 and 2 or 0
for i in range(start, start+len(zbins)-1):
minus[i-start] = math.sqrt(
pow(relative_luminosity, 2) +
pow(pid_contamination*pid_asym_m[i], 2) +
pow(beam_vector*asigma_m[i], 2) +
pow(mcasym_m[i], 2) +
pow(pt_shift_m[i], 2)
)
plus[i-start] = math.sqrt(
pow(relative_luminosity, 2) +
pow(pid_contamination*pid_asym_p[i], 2) +
pow(beam_vector*asigma_p[i], 2) +
pow(mcasym_p[i], 2) +
pow(pt_shift_p[i], 2)
)
return {'minus':minus, 'plus':plus} | 71941441b09a593ebc2a3e396d2b86684bc75cfe | 8,390 |
def extract_metamap(json_, key):
"""
Task function to parse and extract concepts from json_ style dic, using
the MetaMap binary.
Input:
- json_ : dic,
json-style dictionary generated from the Parse object related
to the specific type of input
- key : str,
string denoting the type of medical text to read from. Used to
find the correct paragraph in the settings.yaml file.
Output:
- json_ : dic,
the previous json-style dictionary enriched with medical concepts
"""
# outerfield for the documents in json
docfield = settings['out']['json']['itemfield']
# textfield to read text from
textfield = settings['out']['json']['json_text_field']
N = len(json_[docfield])
for i, doc in enumerate(json_[docfield]):
text = clean_text(doc[textfield])
if len(text) > 5000:
chunks = create_text_batches(text)
results = {'text': text, 'sents': []}
sent_id = 0
for chunk in chunks:
tmp = metamap_wrapper(chunk)
for sent in tmp['sents']:
sent['sent_id'] = sent_id
sent_id += 1
results['sents'].append(sent)
else:
results = metamap_wrapper(text)
json_[docfield][i].update(results)
proc = int(i/float(N)*100)
if proc % 10 == 0 and proc > 0:
time_log('We are at %d/%d documents -- %0.2f %%' % (i, N, proc))
return json_ | 543b1470f36ee85dde2c2447ecc204544ef8fd52 | 8,391 |
def get_testinfo_by_reference(ref_name, ref_type):
""" get test content by reference name
@params:
ref_name: reference name, e.g. api_v1_Account_Login_POST($UserName, $Password)
ref_type: "api" or "suite"
"""
function_meta = parse_function(ref_name)
func_name = function_meta["func_name"]
call_args = function_meta["args"]
test_info = get_test_definition(func_name, ref_type)
def_args = test_info.get("function_meta").get("args", [])
if len(call_args) != len(def_args):
raise exception.ParamsError("call args mismatch defined args!")
args_mapping = {}
for index, item in enumerate(def_args):
if call_args[index] == item:
continue
args_mapping[item] = call_args[index]
if args_mapping:
test_info = substitute_variables_with_mapping(test_info, args_mapping)
return test_info | 967b32149ae094d2ef86cc88b2131047b98b1f09 | 8,392 |
import logging
def game_info(uuid: str) -> dict:
"""
return info about game by uuid
:param uuid:
:return: message
"""
logging.info(uuid)
logging.info(games.keys())
if UUID(uuid) in games.keys():
select_game: Game = games.get(UUID(uuid))
return {
"uuid": uuid,
"start_time": select_game.start_time,
"field": select_game.field,
}
else:
return {"Error": f"{uuid} game not found!"} | 15faaab2f256830a0f2537ada4c742f703a74783 | 8,393 |
def miller_rabin(n, a):
"""
Miller-Rabin Primality Test
Returns true if n is a (probable) prime
Returns false if n is a composite number
"""
s = 0
d = n - 1
while d % 2 == 0:
s = s + 1
d = d >> 1
x = square_and_multiply(a, d, n)
if x != 1 and x + 1 != n:
for r in range(1, s):
x = square_and_multiply(x, 2, n)
if x == 1:
return False
elif x == n - 1:
a = 0
break
if a:
return False
return True | 2de6b54c05d4052e5d2c8fd915a0a2814a7da28f | 8,394 |
def luhn_sum_v1(num):
"""
First version of luhn_sum; uses a list which it modifies in-place.
"""
nums = [int(i) for i in reversed(str(num))]
for i in xrange(1, len(nums), 2):
nums[i] *= 2
return sum(sum(divmod(i, 10)) for i in nums) | c2ff96069710a2321d9871608d0d9bbaddc18d30 | 8,395 |
def get_rate_discounted_rate(item_code, customer, company, so_number = None):
""" This function is use to get discounted rate and rate """
item_group = frappe.get_value("Item", item_code, 'item_group')
# parent_item_group = frappe.get_value("Item Group", item_group, 'parent_item_group')
count = frappe.db.sql(f"""
SELECT
COUNT(*)
FROM
`tabDelivery Note Item` as soi
JOIN
`tabDelivery Note` as so ON so.`name` = soi.`parent`
WHERE
soi.`item_group` = '{item_group}' AND
soi.`docstatus` = 1 AND
so.customer = '{customer}' AND
so.`company` = '{company}'
LIMIT 1
""")
where_clause = ''
if count[0][0]:
where_clause = f"soi.item_group = '{item_group}' AND"
data = None
if so_number:
data = frappe.db.sql(f"""
SELECT
soi.`rate` as `rate`
FROM
`tabDelivery Note Item` as soi
JOIN
`tabDelivery Note` as so ON soi.parent = so.name
WHERE
{where_clause}
so.`customer` = '{customer}' AND
so.`company` = '{company}' AND
so.`docstatus` != 2 AND
so.`name` = '{so_number}'
ORDER BY
soi.`creation` DESC
LIMIT
1
""", as_dict = True)
if not data:
data = frappe.db.sql(f"""
SELECT
soi.`rate` as `rate`
FROM
`tabDelivery Note Item` as soi JOIN
`tabDelivery Note` as so ON soi.parent = so.name
WHERE
{where_clause}
so.`customer` = '{customer}' AND
so.`company` = '{company}' AND
so.`docstatus` != 2
ORDER BY
soi.`creation` DESC
LIMIT
1
""", as_dict = True)
return data[0] if data else {'rate': 0} | 8560bf5846a0500941840d59fb79cd721196a7ca | 8,396 |
def sum_obs_np(A):
"""summation over axis 0 (obs) equivalent to np.sum(A, 0)"""
return np.einsum("ij -> j", A) if A.ndim > 1 else np.sum(A) | c14abc00b2ea6fa64c32fe7ac10f0007cb4705e8 | 8,398 |
def _macos_command_line_infoplist_impl(ctx):
"""Implementation of the internal `macos_command_line_infoplist` rule.
This rule is an internal implementation detail of
`macos_command_line_application` and should not be used directly by clients.
It merges Info.plists as would occur for a bundle but then propagates an
`objc` provider with the necessary linkopts to embed the plist in a binary.
Args:
ctx: The rule context.
Returns:
A `struct` containing the `objc` provider that should be propagated to a
binary that should have this plist embedded.
"""
bundle_id = ctx.attr.bundle_id
infoplists = ctx.files.infoplists
if ctx.attr.version and AppleBundleVersionInfo in ctx.attr.version:
version = ctx.attr.version[AppleBundleVersionInfo]
else:
version = None
if not bundle_id and not infoplists and not version:
fail("Internal error: at least one of bundle_id, infoplists, or version " +
"should have been provided")
plist_results = plist_actions.merge_infoplists(
ctx,
None,
infoplists,
bundle_id = bundle_id,
exclude_executable_name = True,
extract_from_ctxt = True,
include_xcode_env = True,
)
merged_infoplist = plist_results.output_plist
return [
linker_support.sectcreate_objc_provider(
"__TEXT",
"__info_plist",
merged_infoplist,
),
] | d70a47def85cff62fc21e907a9f0a246a9b1c192 | 8,399 |
def get_custom_data_format(*args):
"""
get_custom_data_format(dfid) -> data_format_t
Get definition of a registered custom data format.
@param dfid: data format id (C++: int)
@return: data format definition or NULL
"""
return _ida_bytes.get_custom_data_format(*args) | 8b8751f94a409dc656efbe8131de66a0916be9ea | 8,400 |
def memory(info, func, expr):
"""
checks if the function has been called with the same argument previously and
if so, returns the same results instead of running the function again
args:
-
"""
rows=None
if info:
if func in info.evaluated:
if expr in info.evaluated[func]:
rows = info.evaluated[func][expr]
else:
info.evaluated[func] = {}
else:
info = Info()
info.evaluated[func] = {}
return info, rows | 693ad671b21efbc872508b7a5a4c4aa31852d10a | 8,401 |
def friend_invitation_by_email_verify_for_api( # friendInvitationByEmailVerify
voter_device_id, invitation_secret_key, web_app_root_url=''):
"""
:param voter_device_id:
:param invitation_secret_key:
:param web_app_root_url:
:return:
"""
status = ""
success = False
# If a voter_device_id is passed in that isn't valid, we want to throw an error
device_id_results = is_voter_device_id_valid(voter_device_id)
if not device_id_results['success']:
status += device_id_results['status']
json_data = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'voter_has_data_to_preserve': False,
'invitation_found': False,
'attempted_to_approve_own_invitation': False,
'invitation_secret_key': invitation_secret_key,
'invitation_secret_key_belongs_to_this_voter': False,
}
return json_data
if not positive_value_exists(invitation_secret_key):
status += "VOTER_EMAIL_ADDRESS_VERIFY_MISSING_SECRET_KEY "
error_results = {
'status': status,
'success': True,
'voter_device_id': voter_device_id,
'voter_has_data_to_preserve': False,
'invitation_found': False,
'attempted_to_approve_own_invitation': False,
'invitation_secret_key': invitation_secret_key,
'invitation_secret_key_belongs_to_this_voter': False,
}
return error_results
voter_manager = VoterManager()
voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id)
voter_id = voter_results['voter_id']
if not positive_value_exists(voter_id):
status += "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID "
error_results = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'voter_has_data_to_preserve': False,
'invitation_found': False,
'attempted_to_approve_own_invitation': False,
'invitation_secret_key': invitation_secret_key,
'invitation_secret_key_belongs_to_this_voter': False,
}
return error_results
voter = voter_results['voter']
voter_we_vote_id = voter.we_vote_id
voter_has_data_to_preserve = voter.has_data_to_preserve()
friend_manager = FriendManager()
friend_invitation_results = friend_manager.retrieve_friend_invitation_from_secret_key(
invitation_secret_key, for_accepting_friendship=True, read_only=False)
if not friend_invitation_results['friend_invitation_found']:
status += "INVITATION_NOT_FOUND_FROM_SECRET_KEY "
error_results = {
'status': status,
'success': True,
'voter_device_id': voter_device_id,
'voter_has_data_to_preserve': voter_has_data_to_preserve,
'invitation_found': False,
'attempted_to_approve_own_invitation': False,
'invitation_secret_key': invitation_secret_key,
'invitation_secret_key_belongs_to_this_voter': False,
}
return error_results
# Now that we have the friend_invitation data, look more closely at it
invitation_found = True
voter_we_vote_id_accepting_invitation = ""
email_manager = EmailManager()
if friend_invitation_results['friend_invitation_voter_link_found']:
friend_invitation_voter_link = friend_invitation_results['friend_invitation_voter_link']
if friend_invitation_voter_link.sender_voter_we_vote_id == voter_we_vote_id:
status += "SENDER_AND_RECIPIENT_ARE_IDENTICAL_FAILED "
error_results = {
'status': status,
'success': True,
'voter_device_id': voter_device_id,
'voter_has_data_to_preserve': voter_has_data_to_preserve,
'invitation_found': True,
'attempted_to_approve_own_invitation': True,
'invitation_secret_key': invitation_secret_key,
'invitation_secret_key_belongs_to_this_voter': True,
}
return error_results
voter_we_vote_id_accepting_invitation = friend_invitation_voter_link.recipient_voter_we_vote_id
# Now we want to make sure we have a current_friend entry
recipient_organization_we_vote_id = ''
voter_results = voter_manager.retrieve_voter_by_we_vote_id(
friend_invitation_voter_link.recipient_voter_we_vote_id)
if voter_results['voter_found']:
recipient_organization_we_vote_id = voter_results['voter'].linked_organization_we_vote_id
friend_results = friend_manager.create_or_update_current_friend(
friend_invitation_voter_link.sender_voter_we_vote_id,
friend_invitation_voter_link.recipient_voter_we_vote_id,
voter.linked_organization_we_vote_id,
recipient_organization_we_vote_id
)
friend_manager.update_suggested_friends_starting_with_one_voter(
friend_invitation_voter_link.sender_voter_we_vote_id)
friend_manager.update_suggested_friends_starting_with_one_voter(
friend_invitation_voter_link.recipient_voter_we_vote_id)
accepting_voter_we_vote_id = voter_we_vote_id_accepting_invitation
original_sender_we_vote_id = friend_invitation_voter_link.sender_voter_we_vote_id
results = friend_accepted_invitation_send(accepting_voter_we_vote_id, original_sender_we_vote_id,
web_app_root_url=web_app_root_url)
# Update the PositionNetworkCount entries for both friends
add_position_network_count_entries_for_one_friend(
0, accepting_voter_we_vote_id, voter_we_vote_id=original_sender_we_vote_id)
add_position_network_count_entries_for_one_friend(
0, original_sender_we_vote_id, voter_we_vote_id=accepting_voter_we_vote_id)
# Now that a CurrentFriend entry exists, update the FriendInvitation...
if friend_results['success']:
try:
friend_invitation_voter_link.invitation_status = ACCEPTED
friend_invitation_voter_link.save()
except Exception as e:
success = False
status += 'FAILED_TO_UPDATE_INVITATION_STATUS1 ' + str(e) + ' '
else:
success = False
status += "friend_invitation_voter_link_found CREATE_OR_UPDATE_CURRENT_FRIEND_FAILED "
# We don't need to do anything with the email because this was an invitation to a known voter
elif friend_invitation_results['friend_invitation_email_link_found']:
friend_invitation_email_link = friend_invitation_results['friend_invitation_email_link']
if friend_invitation_email_link.sender_voter_we_vote_id == voter_we_vote_id:
status += "SENDER_AND_RECIPIENT_ARE_IDENTICAL_FAILED "
error_results = {
'status': status,
'success': False,
'voter_device_id': voter_device_id,
'voter_has_data_to_preserve': voter_has_data_to_preserve,
'invitation_found': True,
'attempted_to_approve_own_invitation': True,
'invitation_secret_key': invitation_secret_key,
'invitation_secret_key_belongs_to_this_voter': False,
}
return error_results
this_voter_has_first_or_last_name_saved = voter_manager.this_voter_has_first_or_last_name_saved(voter)
if positive_value_exists(friend_invitation_email_link.recipient_first_name) or \
positive_value_exists(friend_invitation_email_link.recipient_last_name):
we_have_first_or_last_name_from_friend_invitation_email_link = True
else:
we_have_first_or_last_name_from_friend_invitation_email_link = False
# Check to see if the email used has been claimed by a voter account yet
temp_voter_we_vote_id = ""
update_voter_name = False
email_results = email_manager.retrieve_primary_email_with_ownership_verified(
temp_voter_we_vote_id, friend_invitation_email_link.recipient_voter_email)
if email_results['email_address_object_found']:
# The email belongs to this or another voter
email_address_object = email_results['email_address_object']
voter_we_vote_id_accepting_invitation = email_address_object.voter_we_vote_id
# We might need to heal the data in the voter record
if voter_we_vote_id_accepting_invitation != voter_we_vote_id:
email_owner_results = voter_manager.retrieve_voter_by_we_vote_id(email_address_object.voter_we_vote_id)
if email_owner_results['voter_found']:
email_owner_voter = email_owner_results['voter']
voter_manager.update_voter_email_ownership_verified(email_owner_voter, email_address_object)
else:
# If we are here, then the email_address_object doesn't belong to another voter and can be
# claimed by this current voter.
voter_manager.update_voter_email_ownership_verified(voter, email_address_object)
if we_have_first_or_last_name_from_friend_invitation_email_link and \
not this_voter_has_first_or_last_name_saved:
# The current voter does not have first or last name, and we have incoming names to apply
update_voter_name = True
else:
voter_we_vote_id_accepting_invitation = voter_we_vote_id
# If we are here, we know the email is unclaimed. We can assign it to the current voter.
# Is there an email address entry for this voter/email?
email_we_vote_id = ''
email_results = email_manager.retrieve_email_address_object(
friend_invitation_email_link.recipient_voter_email, email_we_vote_id,
voter_we_vote_id)
if email_results['email_address_object_found']:
email_address_object = email_results['email_address_object']
try:
email_address_object.email_ownership_is_verified = True
email_address_object.secret_key = generate_random_string(12) # Reset the secret_key
email_address_object.save()
voter_manager.update_voter_email_ownership_verified(voter, email_address_object)
if we_have_first_or_last_name_from_friend_invitation_email_link and \
not this_voter_has_first_or_last_name_saved:
# The current voter does not have first or last name, and we have incoming names to apply
update_voter_name = True
except Exception as e:
success = False
status += 'FAILED_TO_UPDATE_UNVERIFIED_EMAIL ' + str(e) + ' '
else:
email_ownership_is_verified = True
email_create_results = email_manager.create_email_address_for_voter(
friend_invitation_email_link.recipient_voter_email, voter, email_ownership_is_verified)
if email_create_results['email_address_object_saved']:
email_address_object = email_create_results['email_address_object']
voter_manager.update_voter_email_ownership_verified(voter, email_address_object)
if we_have_first_or_last_name_from_friend_invitation_email_link and \
not this_voter_has_first_or_last_name_saved:
# The current voter does not have first or last name, and we have incoming names to apply
update_voter_name = True
# The current voter does not have first or last name, and we have incoming names that can be used
if update_voter_name:
results = voter_manager.update_voter_name_by_object(
voter, friend_invitation_email_link.recipient_first_name,
friend_invitation_email_link.recipient_last_name)
if results['voter_updated']:
voter = results['voter']
# Now that we know who owns the recipient_email_address, update invitation status
sender_organization_we_vote_id = ''
voter_results = voter_manager.retrieve_voter_by_we_vote_id(
friend_invitation_email_link.sender_voter_we_vote_id)
if voter_results['voter_found']:
sender_organization_we_vote_id = voter_results['voter'].linked_organization_we_vote_id
friend_results = friend_manager.create_or_update_current_friend(
friend_invitation_email_link.sender_voter_we_vote_id,
voter_we_vote_id_accepting_invitation,
sender_organization_we_vote_id,
voter.linked_organization_we_vote_id
)
friend_manager.update_suggested_friends_starting_with_one_voter(
friend_invitation_email_link.sender_voter_we_vote_id)
friend_manager.update_suggested_friends_starting_with_one_voter(voter_we_vote_id_accepting_invitation)
accepting_voter_we_vote_id = voter_we_vote_id_accepting_invitation
original_sender_we_vote_id = friend_invitation_email_link.sender_voter_we_vote_id
friend_accepted_invitation_send(accepting_voter_we_vote_id, original_sender_we_vote_id,
web_app_root_url=web_app_root_url)
# Update the PositionNetworkCount entries for both friends
add_position_network_count_entries_for_one_friend(
0, accepting_voter_we_vote_id, voter_we_vote_id=original_sender_we_vote_id)
add_position_network_count_entries_for_one_friend(
0, original_sender_we_vote_id, voter_we_vote_id=accepting_voter_we_vote_id)
if friend_results['success']:
try:
friend_invitation_email_link.invitation_status = ACCEPTED
friend_invitation_email_link.save()
success = True
status += ' friend_invitation_email_link_found FRIENDSHIP_CREATED '
except Exception as e:
success = False
status += 'FAILED_TO_UPDATE_INVITATION_STATUS2 ' + str(e) + ' '
else:
success = False
status += "friend_invitation_email_link_found CREATE_OR_UPDATE_CURRENT_FRIEND_FAILED "
# And finally, create an organization for this brand new signed-in voter so they can create public opinions
organization_name = voter.get_full_name()
organization_website = ""
organization_twitter_handle = ""
organization_twitter_id = ""
organization_email = ""
organization_facebook = ""
organization_image = voter.voter_photo_url()
organization_type = INDIVIDUAL
organization_manager = OrganizationManager()
create_results = organization_manager.create_organization(
organization_name, organization_website, organization_twitter_handle,
organization_email, organization_facebook, organization_image, organization_twitter_id, organization_type)
if create_results['organization_created']:
# Add value to twitter_owner_voter.linked_organization_we_vote_id when done.
organization = create_results['organization']
try:
voter.linked_organization_we_vote_id = organization.we_vote_id
voter.save()
status += "VOTER_AND_ORGANIZATION_CREATED_FROM_FRIEND_INVITATION "
except Exception as e:
status += "UNABLE_CREATE_AND_LINK_VOTER_FROM_FRIEND_INVITATION " + str(e) + ' '
invitation_secret_key_belongs_to_this_voter = \
voter_we_vote_id == voter_we_vote_id_accepting_invitation
json_data = {
'status': status,
'success': success,
'voter_device_id': voter_device_id,
'voter_has_data_to_preserve': voter_has_data_to_preserve,
'invitation_found': invitation_found,
'attempted_to_approve_own_invitation': False,
'invitation_secret_key': invitation_secret_key,
'invitation_secret_key_belongs_to_this_voter': invitation_secret_key_belongs_to_this_voter,
}
return json_data | 5765f5e06a40de81b36a2dc594f6f67ec236ddcb | 8,402 |
import torch
def _load_image(fnames, dim=None, device=None, label=False):
"""Load a N-D image from disk"""
dat, affine = _map_image(fnames, dim)
if label:
dtype = dat.dtype
if isinstance(dtype, (list, tuple)):
dtype = dtype[0]
dtype = dtypes.as_torch(dtype, upcast=True)
dat0 = dat.data(device=device, dtype=dtype)[0] # assume single channel
if label is True:
label = dat0.unique(sorted=True)
label = label[label != 0].tolist()
dat = torch.zeros([len(label), *dat0.shape], device=device)
for i, l in enumerate(label):
dat[i] = dat0 == l
else:
dat = dat.fdata(device=device, rand=True)
affine = affine.to(dat.device, dat.dtype)
return dat, affine | 8f5dae0666d0173e57a8f0005a4f6d491d2bd58f | 8,404 |
from typing import Tuple
import ctypes
def dtpool(name: str) -> Tuple[int, str, bool]:
"""
Return the data about a kernel pool variable.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dtpool_c.html
:param name: Name of the variable whose value is to be returned.
:return:
Number of values returned for name,
Type of the variable "C", "N", or "X".
"""
name = stypes.string_to_char_p(name)
found = ctypes.c_int()
n = ctypes.c_int()
typeout = ctypes.c_char()
libspice.dtpool_c(name, ctypes.byref(found), ctypes.byref(n), ctypes.byref(typeout))
return n.value, stypes.to_python_string(typeout.value), bool(found.value) | 96584733f7f2ad93ed50e0e48ccd1040eb08dd17 | 8,406 |
def add_similar_tracks(position_or_range = ":", howmany=5, relative_positions=True):
"""
Adds Up to the value of howmany tracks similar to
each track on the current playlist.
parameters:
===========
# position_or_range: The position of the track to add similar tracks
to. Can also be a range string as "START:STOP" with the STOP
position not included, acceptable ranges include empty START and/or empty
STOP values for the first and last values in the playlist. The character
"c" can be used to indicate the current playing posision. Example:
":" for all tracks
":-3" for all but the last three tracks
"c:" for the current playing song and the rest following it
# howmany: Maximum number of added similar tracks
per existing track
# relative_position: Whether to add similar tracks after
their respective original track (True) or at the end
of the playlist (False)
Returns the number of added tracks
"""
# Handle the range case
if type(position_or_range) == str and ":" in position_or_range:
(start, stop) = position_or_range.split(":")
if start == "": start = 0
if stop == "": stop = get_playlist_length()
if start == "c": start = _mpd_current_playlist_position()
if stop == "c": stop = _mpd_current_playlist_position()
(start, stop) = (int(start), int(stop))
if stop < 0:
stop = get_playlist_length + stop
added = 0
for i in xrange(start, stop):
if relative_positions:
added += add_similar_tracks(i+added, howmany, True)
else:
added += add_similar_tracks(i, howmany, False)
return added
# Handle the single position case
added = 0
relative_buffer = {}
normal_buffer = []
if position_or_range == "c":
position = int(_mpd_current_playlist_position())
else:
position = int(position_or_range)
# Get similar tracks' URIs
for track in _get_similar_tracks(_mpd_get_playlist(position)[0]):
if added >= howmany: break
# look up track
uris = _mpd_lookup_track(track)
if not uris: continue
# check to see if it's already added
uri = uris[0]
if _is_track_added(uri): continue
# add it to the buffer
if relative_positions:
relative_buffer[position+added+1] = uri
else:
normal_buffer += [uri]
added += 1
if added < howmany:
print added
artist = _mpd_get_playlist(position)[0]["artist"]
artists = [artist]
artists.extend(_get_similar_artists(artist))
songs = []
for a in artists:
uris = _mpd_lookup_artist_tracks(artist)
songs.extend(uris)
random.shuffle(songs)
for song in songs:
if added >= howmany: break
# check to see if it's already added
if _is_track_added(song): continue
# add it to the buffer
if relative_positions:
relative_buffer[position+added+1] = song
else:
normal_buffer += [song]
added += 1
print added
# add tracks from buffer
_mpd_client.command_list_ok_begin()
if relative_positions:
keys = relative_buffer.keys()
keys.sort()
for key in keys:
_mpd_add_track(relative_buffer[key], key)
else:
for uri in normal_buffer:
_mpd_add_track(uri)
_mpd_client.command_list_end()
return added | 778eadc4d00de1aa2f293d963cdd643499f234dd | 8,407 |
def get_hot_article_tags():
"""
获取文章的所有标签
:return: 返回所有文章的标签
"""
return Tag.objects.filter(is_hot=True) | d4158308ef3ea3cbce646548dc47def9e35fcef7 | 8,408 |
from unittest.mock import patch
def patch_user_interface_null() -> MockedUserInterfaceNull:
"""Patch player interface with no players."""
return patch("steam.api.interface", return_value=MockedUserInterfaceNull()) | 8491d7beef4dfbf949cfb5a27106a948e30487c2 | 8,409 |
import signal
import numpy
def first_localmax_index(data):
"""Return index of first local maxima.
If there is no local maxima (e.g. if all the values are zero),
it will simply return zero.
"""
localmax_indexes = signal.argrelextrema(data, numpy.greater, mode='wrap')
if localmax_indexes[0].size > 0:
return localmax_indexes[0][0]
else:
return 0 | 7cd7baa8d564dd2175e6fd8877a733fca41e63ef | 8,411 |
def _rds_clone_ ( dataset , name = '' ) :
"""Clone dataset
>>> dataset = ...
>>> cloned = datatset.clone ( 'new_name')
"""
name = name if name else dsID ()
return ROOT.RooDataSet ( dataset , name ) | f3e3156987eafb99f05fb99a2d8371c65959faf9 | 8,412 |
def abbink_onset_detector(signal=None, rest=None, sampling_rate=1000.,
size=None, alarm_size=None, threshold=None,
transition_threshold=None):
"""Determine onsets of EMG pulses.
Follows the approach by Abbink et al.. [Abb98]_.
Parameters
----------
signal : array
Input filtered EMG signal.
rest : array, list, dict
One of the following 3 options:
* N-dimensional array with filtered samples corresponding to a
rest period;
* 2D array or list with the beginning and end indices of a segment of
the signal corresponding to a rest period;
* Dictionary with {'mean': mean value, 'std_dev': standard variation}.
sampling_rate : int, float, optional
Sampling frequency (Hz).
size : int
Detection window size (seconds).
alarm_size : int
Number of amplitudes searched in the calculation of the transition
index.
threshold : int, float
Detection threshold.
transition_threshold: int, float
Threshold used in the calculation of the transition index.
Returns
-------
onsets : array
Indices of EMG pulse onsets.
processed : array
Processed EMG signal.
References
----------
.. [Abb98] Abbink JH, van der Bilt A, van der Glas HW, "Detection of onset
and termination of muscle activity in surface electromyograms",
Journal of Oral Rehabilitation, vol. 25, pp. 365–369, 1998
"""
# check inputs
if signal is None:
raise TypeError("Please specify an input signal.")
if rest is None:
raise TypeError("Please specidy rest parameters.")
if size is None:
raise TypeError("Please specify the detection window size.")
if alarm_size is None:
raise TypeError("Please specify the number of amplitudes searched in "
"the calculation of the transition index.")
if threshold is None:
raise TypeError("Please specify the detection threshold.")
if transition_threshold is None:
raise TypeError("Please specify the second threshold.")
# gather statistics on rest signal
if isinstance(rest, np.ndarray) or isinstance(rest, list):
# if the input parameter is a numpy array or a list
if len(rest) >= 2:
# first ensure numpy
rest = np.array(rest)
if len(rest) == 2:
# the rest signal is a segment of the signal
rest_signal = signal[rest[0]:rest[1]]
else:
# the rest signal is provided as is
rest_signal = rest
rest_zero_mean = rest_signal - np.mean(rest_signal)
statistics = st.signal_stats(signal=rest_zero_mean)
mean_rest = statistics['mean']
std_dev_rest = statistics['std_dev']
else:
raise TypeError("Please specify the rest analysis.")
elif isinstance(rest, dict):
# if the input is a dictionary
mean_rest = rest['mean']
std_dev_rest = rest['std_dev']
else:
raise TypeError("Please specify the rest analysis.")
# subtract baseline offset
signal_zero_mean = signal - np.mean(signal)
# full-wave rectification
fwlo = np.abs(signal_zero_mean)
# moving average
mvgav = np.convolve(fwlo, np.ones((size,)) / size, mode='valid')
# calculate the test function
tf = (1 / std_dev_rest) * (mvgav - mean_rest)
# additional filter
filtered_tf, _, _ = st.filter_signal(signal=tf,
ftype='butter',
band='lowpass',
order=10,
frequency=30,
sampling_rate=sampling_rate)
# convert from numpy array to list to use list comprehensions
filtered_tf = filtered_tf.tolist()
onset_time_list = []
offset_time_list = []
alarm_time = 0
onset = False
alarm = False
for k in range(0, len(tf)):
if onset is True:
# an onset was previously detected and we are looking for the offset time, applying the same criteria
if alarm is False:
if filtered_tf[k] < threshold:
# the first index of the sliding window is used as an estimate for the onset time (simple post-processor)
alarm_time = k
alarm = True
else:
# if alarm_time > alarm_window_size and len(emg_conditioned_list) == (alarm_time + alarm_window_size + 1):
if alarm_time > alarm_size and k == (alarm_time + alarm_size + 1):
transition_indices = []
for j in range(alarm_size, alarm_time):
low_list = [filtered_tf[j-alarm_size+a] for a in range(1, alarm_size+1)]
low = sum(i < transition_threshold for i in low_list)
high_list = [filtered_tf[j+b] for b in range(1, alarm_size+1)]
high = sum(i > transition_threshold for i in high_list)
transition_indices.append(low + high)
offset_time_list = np.where(transition_indices == np.amin(transition_indices))[0].tolist()
onset = False
alarm = False
else: # we only look for another onset if a previous offset was detected
if alarm is False:
if filtered_tf[k] >= threshold:
# the first index of the sliding window is used as an estimate for the onset time (simple post-processor)
alarm_time = k
alarm = True
else:
# if alarm_time > alarm_window_size and len(emg_conditioned_list) == (alarm_time + alarm_window_size + 1):
if alarm_time > alarm_size and k == (alarm_time + alarm_size + 1):
transition_indices = []
for j in range(alarm_size, alarm_time):
low_list = [filtered_tf[j-alarm_size+a] for a in range(1, alarm_size+1)]
low = sum(i < transition_threshold for i in low_list)
high_list = [filtered_tf[j+b] for b in range(1, alarm_size+1)]
high = sum(i > transition_threshold for i in high_list)
transition_indices.append(low + high)
onset_time_list = np.where(transition_indices == np.amax(transition_indices))[0].tolist()
onset = True
alarm = False
onsets = np.union1d(onset_time_list,
offset_time_list)
# adjust indices because of moving average
onsets += int(size / 2)
return utils.ReturnTuple((onsets, filtered_tf), ('onsets', 'processed')) | 0ae968c46ff4e5dd5496f09b637a1d5c039ab9fa | 8,414 |
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.resample_jpeg(image_data)
return image, RESIZE_HEIGHT, RESIZE_WIDTH | 6d978af3360692159300b4450cdd851aae842098 | 8,416 |
def float_feature(value):
"""Wrapper for inserting float features into Example proto.
"""
if not isinstance(value,list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value)) | 9333af60465a251883b3efe70de26ce9ce483657 | 8,419 |
def my_charts(request):
"""
define personal graphics page behavior
"""
data = [0, 0, 0, 0]
if request.method == 'POST':
month = request.POST.get('month')
if month is not None:
current_user_id = request.user.id_user
if month == 'all':
all_classifications = list(ClinicalState_28d.
objects.all()) + \
list(ClinicalState_29d_2m.
objects.all()) + \
list(ClinicalState_2m_3y.
objects.all()) + \
list(ClinicalState_3y_10y.
objects.all()) + \
list(ClinicalState_10yMore.
objects.all())
else:
all_classifications = list(ClinicalState_28d.objects.
filter(date__month=month)) + \
list(ClinicalState_29d_2m.objects.
filter(date__month=month)) + \
list(ClinicalState_2m_3y.objects.
filter(date__month=month)) + \
list(ClinicalState_3y_10y.objects.
filter(date__month=month)) + \
list(ClinicalState_10yMore.objects.
filter(date__month=month))
for classification in all_classifications:
if classification.classifier_id == current_user_id:
patient_classification = \
classification.patient.classification
if patient_classification == 1:
data[0] += 1
elif patient_classification == 2:
data[1] += 1
elif patient_classification == 3:
data[2] += 1
elif patient_classification == 4:
data[3] += 1
return render(request, 'users/myCharts.html', {
'data': data
}) | adc11d0748246c753581675eee19b2780b16b832 | 8,420 |
def matrix2yzy_extrinsic(rotation_matrices: np.ndarray) -> np.ndarray:
"""
Ry(k3) @ Rz(k2) @ Ry(k1) = [[c1c2c3-s1s3, -s2c3, s1c2c3+c1c3],
[c1s2, c2, s1s2],
[-c1c2s3, s2s3, -s1c2s3+c1c3]]
"""
rotation_matrices = rotation_matrices.reshape((-1, 3, 3))
angles_radians = np.zeros((rotation_matrices.shape[0], 3))
# Angle 2 can be taken directly from matrices
angles_radians[:, 1] = np.arccos(rotation_matrices[:, 1, 1])
# Gimbal lock case (s2 = 0)
tolerance = 1e-4
# Find indices where this is the case
gimbal_idx = np.abs(rotation_matrices[:, 1, 0]) < tolerance
# Calculate angle 1 and set angle 3 = 0 for those indices
r31 = rotation_matrices[gimbal_idx, 2, 0]
r33 = rotation_matrices[gimbal_idx, 2, 2]
angles_radians[gimbal_idx, 0] = np.arctan2(-r31, r33)
angles_radians[gimbal_idx, 2] = 0
# Normal case (s2 > 0)
idx = np.invert(gimbal_idx)
r23 = rotation_matrices[idx, 1, 2]
r21 = rotation_matrices[idx, 1, 0]
r32 = rotation_matrices[idx, 2, 1]
r12 = rotation_matrices[idx, 0, 1]
angles_radians[idx, 0] = np.arctan2(r23, r21)
angles_radians[idx, 2] = np.arctan2(r32, -r12)
# convert to degrees
euler_angles = np.rad2deg(angles_radians)
return euler_angles | 8a37e65751a26d3fd5c360ce9068626bfee5c594 | 8,421 |
def smallest_subarray_with_given_sum(arr, s):
"""Find the length of the smallest subarray whose sum is >= s.
Time: O(n)
Space: O(1)
>>> smallest_subarray_with_given_sum([2, 1, 5, 2, 3, 2], 7)
2
>>> smallest_subarray_with_given_sum([2, 1, 5, 2, 8], 7)
1
>>> smallest_subarray_with_given_sum([3, 4, 1, 1, 6], 8)
3
"""
win_sum = 0
win_start = 0
min_len = 0
for win_end in range(len(arr)):
win_sum += arr[win_end]
while win_sum >= s:
cur_len = win_end - win_start + 1
if min_len == 0 or cur_len < min_len:
min_len = cur_len
win_sum -= arr[win_start]
win_start += 1
return min_len | 4a1d63619fc200c32ffae80dc7d404f486efcdd1 | 8,422 |
from typing import OrderedDict
def create_lit_model(
model: str,
input_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
output_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821
attribution_method: str = "sampled_shapley",
) -> lit_model.Model:
"""Creates a LIT Model object.
Args:
model:
Required. A string reference to a local TensorFlow saved model directory.
The model must have at most one input and one output tensor.
input_types:
Required. An OrderedDict of string names matching the features of the model
as the key, and the associated LitType of the feature.
output_types:
Required. An OrderedDict of string names matching the labels of the model
as the key, and the associated LitType of the label.
attribution_method:
Optional. A string to choose what attribution configuration to
set up the explainer with. Valid options are 'sampled_shapley'
or 'integrated_gradients'.
Returns:
A LIT Model object that has the same functionality as the model provided.
"""
return _TensorFlowLitModel(model, input_types, output_types, attribution_method) | 355eaebe6e59733d1831f993d56462ee36e4ff9a | 8,423 |
from typing import List
from typing import Dict
from typing import OrderedDict
def show_lightning_round_zero_correct(database_connection: mysql.connector.connect
) -> List[Dict]:
"""Return list of shows in which a panelist answers zero Lightning
Fill-in-the-Blank round questions correct"""
cursor = database_connection.cursor(dictionary=True)
query = ("SELECT s.showid, s.showdate, p.panelistid, p.panelist, "
"pm.panelistlrndstart, pm.panelistlrndcorrect, "
"pm.panelistscore, pm.showpnlrank "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"WHERE s.bestof = 0 AND s.repeatshowid IS null "
"AND pm.panelistlrndcorrect = 0 "
"ORDER BY s.showdate ASC;")
cursor.execute(query)
result = cursor.fetchall()
cursor.close()
if not result:
return None
shows = []
for row in result:
show = OrderedDict()
show["id"] = row["showid"]
show["date"] = row["showdate"].isoformat()
panelist = OrderedDict()
panelist["id"] = row["panelistid"]
panelist["name"] = row["panelist"]
panelist["start"] = row["panelistlrndstart"]
panelist["correct"] = row["panelistlrndcorrect"]
panelist["score"] = row["panelistscore"]
panelist["rank"] = row["showpnlrank"]
show["panelist"] = panelist
shows.append(show)
return shows | 5c218639fd2321239d9f791221f2ad30f17ead02 | 8,424 |
import requests
def get_webpage(page_url):
"""Get the OOTS index webpage and return the content."""
result = requests.get(page_url)
if result.status_code == 200:
return result.text
else:
_logger.error(
colored(
"Unable to read the OOTS data,please check your connection.",
"red",
attrs=["bold"],
)
)
_logger.error(colored(f"URL : {page_url}", "red"))
quit(1) | e9b88f69b9dca0d5cf525a26e7e43fd118698225 | 8,425 |
def generate_config(context):
""" Entry point for the deployment resources. """
properties = context.properties
name = properties.get('name', context.env['name'])
bastion_props = {
'zone': properties['zone'],
'network': properties['network'],
'machineType': properties['machineType'],
'diskImage': IMAGE
}
bastion = {'name': name, 'type': 'instance.py', 'properties': bastion_props}
optional_props = ['diskSizeGb', 'metadata', 'tags']
for prop in optional_props:
set_optional_property(bastion_props, properties, prop)
if properties.get('disableSudo'):
disable_sudo(bastion_props)
firewall_settings = properties.get('createFirewallRules')
if firewall_settings:
extra_resources, extra_outputs = create_firewall_rules(
bastion,
firewall_settings
)
else:
extra_resources = []
extra_outputs = []
outputs = [
{
'name': 'name',
'value': name
},
{
'name': 'selfLink',
'value': '$(ref.{}.selfLink)'.format(name)
},
{
'name': 'internalIp',
'value': '$(ref.{}.internalIp)'.format(name)
},
{
'name': 'externalIp',
'value': '$(ref.{}.externalIp)'.format(name)
}
]
return {
'resources': [bastion] + extra_resources,
'outputs': outputs + extra_outputs
} | f22dbb3cb3500766badb6c28eb3de35b6ba5ba3c | 8,426 |
import inspect
def dump_args(func):
"""Decorator to print function call details - parameters names and effective values.
"""
def wrapper(*args, **kwargs):
func_args = inspect.signature(func).bind(*args, **kwargs).arguments
func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items())
print(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )')
return func(*args, **kwargs)
return wrapper | 673158019aa3a8343718b9648b61ef4a3699f050 | 8,427 |
from typing import Tuple
def bigaussian(
n_particles: int,
mean: Tuple[float, float, float, float, float],
geometric_emittance_h: float,
geometric_emittance_v: float,
sigma_p: float,
) -> np.array:
"""Generate a bigaussian distributed distribution.
Args:
n_particles: Number of particles.
meam: Distribution centers.
geometric_emittance: Geometric emittance.
sigma_p: Absolute momentum spread.
Returns:
Array of position and angle phase space coordinates of the distribution.
"""
cov = np.diag(
(
geometric_emittance_h,
geometric_emittance_h,
geometric_emittance_v,
geometric_emittance_v,
sigma_p ** 2,
)
)
return np.random.multivariate_normal(mean, cov, n_particles).T | a8c7b9cf7500fde899cdcc163a31450b59d0d7d0 | 8,428 |
def horizontal_tile(silhouette, reps = 2):
"""Places two silhouettes side-by-side with an empty line in the middle."""
silhouette = np.append(silhouette,np.zeros((silhouette.shape[0],1)),axis=1)
return np.tile(silhouette,(1,reps))[:,:] | ddccc0ff9cb7f1fba56dfc52de723f5253729952 | 8,429 |
def grads_norm(parameters):
"""get grad norms of the parameters, useful for model inspection"""
t = [p.grad for p in parameters if p.grad is not None]
return many_l2_norm(*t) | 9904f5313f63387ba0c4f139029759118f2ecae8 | 8,430 |
def django_admin_add_object(request, context):
"""show add object"""
if request and request.user.is_staff and (context.get('object', None) or context.get('model', None)):
object_class = context.get('model', None)
if not object_class:
object_class = context['object'].__class__
view_name = 'admin:{0}_{1}_add'.format(get_model_app(object_class), get_model_name(object_class))
try:
return make_link(
reverse(view_name),
_('Add {0}').format(get_model_label(object_class)), 'table',
classes=['icon', 'alert_on_click']
)
except NoReverseMatch:
pass | 909c79cc75913afff341eed84286edd79352fc0c | 8,431 |
def get_config():
"""Returns an instance of the configured config class.
:return: Project's defined Adyen configuration.
:rtype: :class:`AbstractAdyenConfig`
By default, this function will return an instance of
:class:`adyen.settings_config.FromSettingsConfig`. If
:data:`ADYEN_CONFIG_CLASS` is defined, it will try to load this class and
return an instance of this class instead.
.. note::
This function expects :data:`ADYEN_CONFIG_CLASS` to be a string that
represent the python import path of the Adyen config class, such as
``adyen.settings_config.FromSettingsConfig``.
"""
try:
config_class_string = settings.ADYEN_CONFIG_CLASS
except AttributeError:
config_class_string = 'adyen.settings_config.FromSettingsConfig'
return import_string(config_class_string)() | 16a26bd31752211d2aa7a22858f0317ba90b5bad | 8,432 |
import math
def aperiodic(amp, samples):
"""an aperiodic oscillating signal
Parameters
----------
amp : float
values range over +-amp
samples : int
number of samples to generate
Returns
-------
ndarray
"""
periods = np.abs(sine(samples, samples, 1)) + samples / 10
seq = [amp * math.sin(i * 2 * math.pi / periods[i]) for i in range(samples)]
return np.array(seq) | 42428a16fbfee7cf2a9d8b566fc122e9c56b7e6a | 8,433 |
import base64
def removeHs(ctab):
"""
Removes any hydrogens from the graph of a molecule. CTAB is urlsafe_base64 encoded string containing single molfile or
concatenation of multiple molfiles.
cURL examples:
curl -X GET ${BEAKER_ROOT_URL}removeHs/$(cat removeHs.mol | base64 -w 0 | tr "+/" "-_")
curl -X GET ${BEAKER_ROOT_URL}removeHs/$(cat removeHs.mol | base64 -w 0 | tr "+/" "-_")?implicitOnly=1
"""
data = base64.urlsafe_b64decode(ctab)
return removeHsView(data, request.params) | f3064e2ce3a1db1bb260da49da2df3fb1eaf1310 | 8,434 |
def judge_key(key: str, up: any):
"""判断key是否存在"""
if dict == type(up):
if key in up:
return True
else:
for dict_key, dict_value in up.items():
if dict == type(dict_value) or list == type(dict_value):
result = judge_key(key, dict_value)
if result:
return result
return False
elif list == type(up):
for dict_value in up:
if dict == type(dict_value) or list == type(dict_value):
result = judge_key(key, dict_value)
if result:
return result
return False
else:
return False | ee0086259343df30cfc7b72951a165b557a843f9 | 8,435 |
def apply_changes(patch_obj_dic, file_dic):
"""
If all checks are passed, write the changes to the patch file. Note that the original file is overwritten
:return:
"""
success = False
error_title = None
error_msg = None
# Checks that mutually exclusive options have not been set together. If they have, alert the user,
# and abort before writing to file(s)
for (fn, patch_obj_list) in iterDic(patch_obj_dic):
mut_exl_dic = {}
for obj in patch_obj_list:
if obj.group and 'yes' in obj.status:
if obj.group not in mut_exl_dic:
mut_exl_dic[obj.group] = []
mut_exl_dic[obj.group].append(obj.name)
else:
mut_exl_dic[obj.group].append(obj.name)
for (group, names) in iterDic(mut_exl_dic):
if len(names) > 1:
name_str = '\n'
for name in names:
name_str += ' ' + name + '\n'
error_title = 'Mutually Exlusive Options Detected!'
error_msg = 'The following options cannot be enabled together: \n' + name_str + \
fn + ' was not written.'
success = False
return success, error_title, error_msg
# If checks passed, prepare and then write data to file(s)
for (fn, patch_obj_list) in iterDic(patch_obj_dic):
for obj in patch_obj_list:
file_dic = prep_for_writing(fn, obj, file_dic)
r_p_f_success, error_title, error_msg = write_patch_files(fn, file_dic)
if not r_p_f_success:
success = False
return success, error_title, error_msg
success = True
return success, error_title, error_msg | ad52721ab338b0124869c32c2e08f202deeb981f | 8,436 |
def construct_1D_scan_fast(gate, swing, n_pt, t_step, biasT_corr, pulse_lib, digitizer, channels,
dig_samplerate, dig_vmax=2.0, iq_mode=None, acquisition_delay_ns=None,
enabled_markers=[], channel_map=None, pulse_gates={}, line_margin=0):
"""
1D fast scan parameter constructor.
Args:
gate (str) : gate/gates that you want to sweep.
swing (double) : swing to apply on the AWG gates. [mV]
n_pt (int) : number of points to measure (current firmware limits to 1000)
t_step (double) : time in ns to measure per point. [ns]
biasT_corr (bool) : correct for biasT by taking data in different order.
pulse_lib : pulse library object, needed to make the sweep.
digitizer : digitizer object
channels : digitizer channels to read
dig_samplerate : digitizer sample rate [Sa/s]
iq_mode (str or dict): when digitizer is in MODE.IQ_DEMODULATION then this parameter specifies how the
complex I/Q value should be plotted: 'I', 'Q', 'abs', 'angle', 'angle_deg'. A string applies to
all channels. A dict can be used to specify selection per channel, e.g. {1:'abs', 2:'angle'}.
Note: channel_map is a more generic replacement for iq_mode.
acquisition_delay_ns (float):
Time in ns between AWG output change and digitizer acquisition start.
This also increases the gap between acquisitions.
enable_markers (List[str]): marker channels to enable during scan
channel_map (Dict[str, Tuple(int, Callable[[np.ndarray], np.ndarray])]):
defines new list of derived channels to display. Dictionary entries name: (channel_number, func).
E.g. {(ch1-I':(1, np.real), 'ch1-Q':(1, np.imag), 'ch3-Amp':(3, np.abs), 'ch3-Phase':(3, np.angle)}
The default channel_map is:
{'ch1':(1, np.real), 'ch2':(2, np.real), 'ch3':(3, np.real), 'ch4':(4, np.real)}
pulse_gates (Dict[str, float]):
Gates to pulse during scan with pulse voltage in mV.
E.g. {'vP1': 10.0, 'vB2': -29.1}
line_margin (int): number of points to add to sweep 1 to mask transition effects due to voltage step.
The points are added to begin and end for symmetry (bias-T).
Returns:
Parameter (QCODES multiparameter) : parameter that can be used as input in a conversional scan function.
"""
vp = swing/2
# set up sweep voltages (get the right order, to compenstate for the biasT).
voltages_sp = np.linspace(-vp,vp,n_pt)
if biasT_corr:
m = (n_pt+1)//2
voltages = np.zeros(n_pt)
voltages[::2] = voltages_sp[:m]
voltages[1::2] = voltages_sp[m:][::-1]
else:
voltages = voltages_sp
return dummy_digitzer_scan_parameter(digitizer, None, pulse_lib, t_step, (n_pt, ), (gate, ),
( tuple(voltages_sp), ), biasT_corr, 500e6) | cbbffe77187cfd923b1e9b5982fb1e2b6319a854 | 8,437 |
def assemble_f_local(ck, f_func, p1, p2, p3):
"""
Assemble the local contribution to the f_load_lv for the element
Parameters
----------
ck : np.array
basis function coef. matrix.
f_func : function
load function.
p1 : np.array
first vertex of the triangle element.
p1 : np.array
second vertex of the triangle element.
p1 : np.array
third vertex of the triangle element.
Returns
-------
np.array
local contribution to f_load_lv.
"""
f_local = np.zeros(6, dtype=float)
for ki in range(6):
i, di = inv_index_map(ki)
def f_phi(x, y):
return f_func(x, y)[:, di] * phi(x, y, ck, i)
f_local[ki] = quadrature2D(p1, p2, p3, 4, f_phi)
return f_local | 9912026fbde63b0cf6780a8b3fc8131dbc99c809 | 8,438 |
def take_turn(num_rolls, opponent_score, dice=six_sided):
"""Simulate a turn rolling NUM_ROLLS dice, which may be 0 (Free Bacon).
Return the points scored for the turn by the current player. Also
implements the Hogtimus Prime rule.
num_rolls: The number of dice rolls that will be made.
opponent_score: The total score of the opponent.
dice: A function that simulates a single dice roll outcome.
"""
# Leave these assert statements here; they help check for errors.
assert type(num_rolls) == int, 'num_rolls must be an integer.'
assert num_rolls >= 0, 'Cannot roll a negative number of dice in take_turn.'
assert num_rolls <= 10, 'Cannot roll more than 10 dice.'
assert opponent_score < 100, 'The game should be over.'
# BEGIN PROBLEM 2
"*** REPLACE THIS LINE ***"
roll_score=0
if num_rolls==0:
roll_score=free_bacon(opponent_score)
else:
roll_score=roll_dice(num_rolls,dice)
if is_prime(roll_score):
return next_prime(roll_score)
else:
return roll_score
# END PROBLEM 2 | f072341dde309a7b612da896d2db348c92a7f0c4 | 8,439 |
def __sort_vertices(points):
"""Return vertices that are sorted by average center of all points."""
points = list(set(points))
if len(points) < 3:
return None
start_point = __find_average_center(points)
start_vector = Vector3D.by_points(start_point, points[0])
return sorted(points, key=lambda point:
GeometryUtils.angle_between(
start_vector,
Vector3D.by_points(start_point, point))) | b89374b1b8e06c3bcc87b074239c1cc13ecd7de4 | 8,440 |
from typing import List
from typing import Tuple
def create_feature(
tokens: List[str],
label_ids: List[int],
words_map: List[Tuple[int, int, bool]],
max_seq_length: int,
tokenizer: PreTrainedTokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
words_map_pad=(-1, -1, True)
) -> Tuple[InputFeatures, List[Tuple[int, int, str]]]:
"""
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = tokenizer.num_special_tokens_to_add()
# if len(tokens) > max_seq_length - special_tokens_count:
# tokens = tokens[: (max_seq_length - special_tokens_count)]
# label_ids = label_ids[: (max_seq_length - special_tokens_count)]
assert (len(tokens) <= max_seq_length - special_tokens_count)
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
words_map += [words_map_pad]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
words_map += [words_map_pad]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
words_map += [words_map_pad]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
words_map = [words_map_pad] + words_map
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
words_map = ([words_map_pad] * padding_length) + words_map
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
words_map += [words_map_pad] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(words_map) == max_seq_length
if "token_type_ids" not in tokenizer.model_input_names:
segment_ids = None
return InputFeatures(
input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=segment_ids,
label_ids=label_ids,
), words_map | 7dccdf98a07ec254abc14c15bba4c5ea307e8f2b | 8,441 |
from scipy.stats import norm, uniform
def calculate_log_likelihood_and_derivative_at_parameter_point_with_mRNA(protein_at_observations,model_parameters,mean_protein,measurement_variance,mRNA_parameters):
"""
Calculates the log of the likelihood, and the derivative of the negative log likelihood wrt each parameter, of our data given the
paramters, using the Kalman filter. It uses the predicted_observation_distributions, predicted_observation_mean_derivatives, and
predicted_observation_variance_derivatives from the kalman_filter function. It returns the log likelihood as in the
calculate_log_likelihood_at_parameter_point function, and also returns an array of the derivative wrt each parameter.
Parameters
----------
protein_at_observations : numpy array.
Observed protein. The dimension is n x 2, where n is the number of observation time points.
The first column is the time, and the second column is the observed protein copy number at
that time.
model_parameters : numpy array.
An array containing the moderowl parameters in the following order:
repression_threshold, hill_coefficient, mRNA_degradation_rate,
protein_degradation_rate, basal_transcription_rate, translation_rate,
transcription_delay.
mean_protein : float.
The mean protein value, used to set prior bounds for the repression threshold
measurement_variance : float.
The variance in our measurement. This is given by Sigma_e in Calderazzo et. al. (2018).
mRNA_parameters : numpy array.
two element array, mean and standard deviation of the mRNA distribution
Returns
-------
log_likelihood : float.
The log of the likelihood of the data.
log_likelihood_derivative : numpy array.
The derivative of the log likelihood of the data, wrt each model parameter
"""
number_of_parameters = model_parameters.shape[0]
if ((uniform(50,2*mean_protein-50).pdf(model_parameters[0]) == 0) or
(uniform(2,6-2).pdf(model_parameters[1]) == 0) or
(uniform(np.log(2)/150,np.log(2)/10 - np.log(2)/150).pdf(model_parameters[2]) == 0) or
(uniform(np.log(2)/150,np.log(2)/10 - np.log(2)/150).pdf(model_parameters[3]) == 0) or
(uniform(0.01,120-0.01).pdf(model_parameters[4]) == 0) or
(uniform(0.01,40-0.01).pdf(model_parameters[5]) == 0) or
(uniform(1,40-1).pdf(model_parameters[6]) == 0) ):
return -np.inf, np.zeros(number_of_parameters)
state_space_mean, _, _, _, predicted_observation_distributions, predicted_observation_mean_derivatives, predicted_observation_variance_derivatives = kalman_filter(protein_at_observations,
model_parameters,
measurement_variance,
derivative=True)
mean_mRNA = np.mean(state_space_mean[:,1])
# calculate log likelihood as before
if protein_at_observations.reshape(-1,2).shape[0] == 1:
number_of_observations = 1
observations = [protein_at_observations[1]]
else:
number_of_observations = protein_at_observations.shape[0]
observations = protein_at_observations[:,1]
mean = predicted_observation_distributions[:,1]
sd = np.sqrt(predicted_observation_distributions[:,2])
# add mRNA penalty
log_likelihood = np.sum(norm.logpdf(observations,mean,sd)) + norm.logpdf(mean_mRNA,
mRNA_parameters[0],
mRNA_parameters[1])
# now for the computation of the derivative of the negative log likelihood. An expression of this can be found
# at equation (28) in Mbalawata, Särkkä, Haario (2013)
observation_transform = np.array([[0.0,1.0]])
helper_inverse = 1.0/predicted_observation_distributions[:,2]
log_likelihood_derivative = np.zeros(number_of_parameters)
for parameter_index in range(number_of_parameters):
for time_index in range(number_of_observations):
log_likelihood_derivative[parameter_index] -= 0.5*(helper_inverse[time_index]*np.trace(observation_transform.dot(
predicted_observation_variance_derivatives[time_index,parameter_index].dot(
np.transpose(observation_transform))))
-
helper_inverse[time_index]*np.transpose(observation_transform.dot(
predicted_observation_mean_derivatives[time_index,parameter_index]))[0]*
(observations[time_index] - mean[time_index])
-
np.power(helper_inverse[time_index],2)*np.power(observations[time_index] - mean[time_index],2)*
observation_transform.dot(
predicted_observation_variance_derivatives[time_index,parameter_index].dot(
np.transpose(observation_transform)))
-
helper_inverse[time_index]*(observations[time_index] - mean[time_index])*
observation_transform.dot(predicted_observation_mean_derivatives[time_index,parameter_index])[0])
return log_likelihood, log_likelihood_derivative | 9d187b23c5a56e15c2bd900242823e5780991f7c | 8,444 |
def SEORedirectMiddleware(get_response):
"""
Intercepts 404 errors and checks the database for any defined
redirecs that match the current request path.
"""
def middleware(request):
response = get_response(request)
if response.status_code != 404:
return response
try:
r = Redirect.objects.get(url=normalize_url(request.path))
except Redirect.DoesNotExist:
return response
to = r.target_url
kwargs = dict(permanent=r.is_permanent)
if r.with_query_string:
to = modify_url_query_string(to, replace=request.GET.dict())
return redirect(to, **kwargs)
return middleware | 750e8f3603114c8a6474f2fdfde76cefea1eacf7 | 8,445 |
def b2s(src):
"""
Convert from bytes to string
:param src: bytes
:return: string
"""
return src.decode(encoding=UTF_ENCODING) | 3a71fe7684ce57833db6861a250b0ba1d5fbfd47 | 8,446 |
def get_finetune_lfo_type(header: bytes) -> AutomationLfoType:
"""Return finetune LFO type."""
assert isinstance(value := _unpack(header, "FINETUNE_LFO_TYPE"), int), type(value)
return AutomationLfoType(value) | 2dcaf71353d0641dd1f0cac1bf83ecc720cd666b | 8,447 |
def locate(client: Client, structure: Structure) -> str:
"""Locates the respective structure."""
return client.run('locate', structure) | 93e2d83c6be7cc5d4a628233ccdadda2f9a914a5 | 8,448 |
def teraflops_for_accelerator(accel):
"""
Stores the number of TFLOPs available to a few accelerators, including driver handicaps.
Args:
accel (str): A string descriptor of which accelerator to use. Must be either "3090" or "V100".
Returns:
accel_flops (int): an integer of how many TFLOPs are in the accelerator.
"""
accel_flops = {"3090": 71, "V100": 125}
return accel_flops[accel] | a491beb06baf73325e2e7b5f0876e98ea312e2aa | 8,449 |
def reduced_supercell_vectors(ab, n):
"""
Returns all possible reduced in-plane lattice vectors and
transition matrices for the given starting unit cell lattice
vectors(ab) and the supercell size n
Args:
ab: a, b lattice vectors
n: (int) supercell size
"""
uv_list = []
tm_list = []
for r_tm in get_trans_matrices(n):
uv = get_uv(ab, r_tm)
uv_r, tm0 = get_reduced_uv(uv, r_tm)
uv_list.append(uv_r)
tm_list.append(tm0)
return uv_list, tm_list | fba60388b42beb170bfba96a9aeeccc4e1d74dbf | 8,450 |
import json
def jsonify(*args, **kwargs):
"""Creates a `Response` with the JSON representation of
the given arguments with an`application/json` mimetype. The
arguments to this function are the same as to the `dict`
constructor.
Example usage:
from cocopot import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser:
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
"""
indent = None
separators = (',', ':')
rv = Response(json.dumps(dict(*args, **kwargs), indent=indent, separators=separators),
content_type='application/json')
return rv | 04fe7d2808081f9a9f9b7eb610e168c8329298cb | 8,451 |
def logged_in_profile(client):
"""Add a Profile and logged-in User"""
user = UserFactory.create(username="george")
client.force_login(user)
return user.profile | b4ed5872cf8da789f3e6ab001b8afc556c0faa50 | 8,452 |
def get_storage_backend():
"""
Return the singleton instance of the storage backend in use.
"""
global _STORAGE_BACKEND
if _STORAGE_BACKEND is None:
module, klass = ClassLoader.split(str(config.STORAGE_BACKEND_CLASS))
cl = ClassLoader(module, klass, config.STORAGE_BACKEND_ARGS)
_STORAGE_BACKEND = cl.get_instance()
return _STORAGE_BACKEND | d23315854bf736637f483f1d802b868d4c45ff8a | 8,453 |
def _pr_compile(regex, cleanup=None):
"""Prepare a 2-tuple of compiled regex and callable."""
return (_re_compile(regex), cleanup) | 832e794bef679272231e336aa2128cf1457abb8d | 8,454 |
def config():
"""
Configuration via config.json (introduced in Anki 2.1)
"""
try:
getConfig = mw.addonManager.getConfig
except AttributeError:
return LEGACY_CONFIG
return getConfig(__name__) | cc099497d55ccef8195b47dd4d080695e9af370c | 8,455 |
def ping(request):
"""
This view returns a dummy json. It is meant to be used to check whether
the server is alive or not
"""
return Json(None) | 54f8d05454913b4119f1580f5d8a19a878e76c13 | 8,456 |
import numpy as np
import copy
def copy_ffn(model):
"""Copy feed forward network model.
Args:
model: A previously created ffn model
Returns:
A copy of the model
"""
#init model as list holding data for each layer start with input layer
newmodel = []
newmodel.append({
"layer":0,
"n": copy.copy(model[0]['n']),
"activation": copy.copy(model[0]["activation"]),
"lreg": copy.copy(model[0]["lreg"]),
"regval": copy.copy(model[0]["regval"]),
"desc": copy.copy(model[0]["desc"])
})
# init weights and biases for hidden layers and declare activation function
for layer in range(1, len(model)):
newmodel.append({
"layer":layer,
"n": copy.copy(model[layer]['n']),
"activation": copy.copy(model[layer]["activation"]),
"lreg": copy.copy(model[layer]["lreg"]),
"regval": copy.copy(model[layer]["regval"]),
"desc": copy.copy(model[layer]["desc"]),
"weight": np.copy(model[layer]["weight"]),
"bias": np.copy(model[layer]["bias"]),
"weightdot": np.copy(model[layer]["weightdot"]),
"biasdot": np.copy(model[layer]["biasdot"])
})
return newmodel | 5bde1163d5d53a75839b15aaa38a28ecc54b195c | 8,457 |
def is_big(label: str) -> bool:
"""Returns whether or not a cave is large based on its label"""
return label.isupper() | 7abdb0c5687e7870c96b767dc498e1f3c4ed21fe | 8,459 |
def fast_mult_polynoms(a, b):
"""Fast multiply of two polynoms in GF(2^8) using the log table
NB. This is NOT constant-time and leaks secret values in timing differences.
DO NOT USE THIS CODE TO IMPLEMENT SECURE APPLICATIONS
"""
if a == 0 or b == 0:
return 0
return POWER_X1_TABLE[(LOG_X1_TABLE[a] + LOG_X1_TABLE[b]) % 255] | 3d670b2380963c50f74eb2f671ccdf7378ce58aa | 8,460 |
def get_page_for_group(user_groups, slug):
"""
Returns a page associated with user_groups given a slug.
"""
try:
page = get_pages_for_group(user_groups).get( slug = slug)
except Page.DoesNotExist:
page = None
return page | 6ea742688f07ca2ee0dd1ee4665598e074759229 | 8,461 |
def read_gps(gps_path):
"""Read GPS feed in CSV.
Expects GPS structured as:
vehicle_id: str
Internal system identification of the vehicle.
Should be unique per vehicle, and is used for tracking the
vehicle as it proceeds through the system.
route_id: str
The route_id from the GTFS feed that this selector refers to
datetime: datetime
Moment at which the vehicle's position was measured
latitude: float
Degrees North, in the WGS-84 coordinate system.
longitude: float
Degrees East, in the WGS-84 coordinate system.
Parameters
----------
gps_path : [type]
[description]
Returns
-------
pm.MoveDataFrame
GPS data as a MoveDataFrame
"""
return pm.MoveDataFrame(
data=pd.read_csv(gps_path),
latitude="latitude",
longitude="longitude",
datetime="datetime",
traj_id="vehicle_id",
) | 2ec09b69646b31e38b07e25bf2ffa0f0c002f52b | 8,462 |
def _ureduce(a, func, **kwargs):
"""
Internal Function.
Call `func` with `a` as first argument swapping the axes to use extended
axis on functions that don't support it natively.
Returns result and a.shape with axis dims set to 1.
Parameters
----------
a : array_like
Input tensor or object that can be converted to a tensor.
func : callable
Reduction function capable of receiving a single axis argument.
It is called with `a` as first argument followed by `kwargs`.
kwargs : keyword arguments
additional keyword arguments to pass to `func`.
Returns
-------
result : tuple
Result of func(a, **kwargs) and a.shape with axis dims set to 1
which can be used to reshape the result to the same shape a ufunc with
keepdims=True would produce.
"""
axis = kwargs.get('axis', None)
if axis is not None:
keepdim = list(a.shape)
nd = a.ndim
axis = normalize_axis_tuple(axis, nd)
for ax in axis:
keepdim[ax] = 1
if len(axis) == 1:
kwargs['axis'] = axis[0]
else:
keep = set(range(nd)) - set(axis)
nkeep = len(keep)
# swap axis that should not be reduced to front
for i, s in enumerate(sorted(keep)):
a = a.swapaxes(i, s)
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
keepdim = tuple(keepdim)
else:
keepdim = (1,) * a.ndim
r = func(a, **kwargs)
return r, keepdim | 93be4e1a26dec25b74e6a9f330863ea7677cd614 | 8,463 |
def compute_autocorrelation_local(x, Fs, N, H, norm_sum=True):
"""Compute local autocorrelation [FMP, Section 6.2.3]
Notebook: C6/C6S2_TempogramAutocorrelation.ipynb
Args:
x: Input signal
Fs: Sampling rate
N: Window length
H: Hop size
norm_sum: Normalizes by the number of summands in local autocorrelation
Returns:
A: Time-lag representation
T_coef: Time axis (seconds)
F_coef_lag: Lag axis
"""
L = len(x)
L_left = round(N / 2)
L_right = L_left
x_pad = np.concatenate((np.zeros(L_left), x, np.zeros(L_right)))
L_pad = len(x_pad)
M = int(np.floor(L_pad - N) / H) + 1
A = np.zeros((N, M))
win = np.ones(N)
if norm_sum is True:
lag_summand_num = np.arange(N, 0, -1)
for n in range(M):
t_0 = n * H
t_1 = t_0 + N
x_local = win * x_pad[t_0:t_1]
r_xx = np.correlate(x_local, x_local, mode='full')
r_xx = r_xx[N-1:]
if norm_sum is True:
r_xx = r_xx / lag_summand_num
A[:, n] = r_xx
Fs_A = Fs / H
T_coef = np.arange(A.shape[1]) / Fs_A
F_coef_lag = np.arange(N) / Fs
return A, T_coef, F_coef_lag | 8e67de8279e0daae90ae3391064ea92b023dfafc | 8,464 |
def euclid_dist(vector_p1, vector_p2):
""" calculated the euclidean distance between 2 points """
distances = vector_p1 - vector_p2
return cp.hypot(distances[:, :, 0], distances[:, :, 1]) | 6f9d366cddb62f9ad1a8e26c9c2179fb73238a32 | 8,465 |
def _name_cleaner(agent_name):
"""Renames agent_name to prettier string for plots."""
rename_dict = {'correct_ts': 'Correct TS',
'kl_ucb': 'KL UCB',
'misspecified_ts': 'Misspecified TS',
'ucb1': 'UCB1',
'ucb-best': 'UCB-best',
'nonstationary_ts': 'Nonstationary TS',
'stationary_ts': 'Stationary TS',
'greedy': 'greedy',
'ts': 'TS',
'action_0': 'Action 0',
'action_1': 'Action 1',
'action_2': 'Action 2',
'bootstrap': 'bootstrap TS',
'laplace': 'Laplace TS',
'thoughtful': 'Thoughtful TS',
'gibbs': 'Gibbs TS'}
if agent_name in rename_dict:
return rename_dict[agent_name]
else:
return agent_name | e874745e804e07e385b377ec0ecd4247640ef6ce | 8,466 |
def add_training_args(parser):
"""Training arguments."""
group = parser.add_argument_group('train', 'training configurations')
group.add_argument('--experiment-name', type=str, default="gpt-345M",
help="The experiment name for summary and checkpoint")
group.add_argument('--batch-size', type=int, default=4,
help='Data Loader batch size')
group.add_argument('--gradient-accumulation-steps', type=int, default=1,
help='Data Loader batch size')
group.add_argument('--weight-decay', type=float, default=0.01,
help='weight decay coefficient for L2 regularization')
group.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activation to allow for training '
'with larger models and sequences')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--epochs', type=int, default=None,
help='Number of finetunning epochs. Zero results in evaluation only.')
group.add_argument('--clip-grad', type=float, default=1.0,
help='gradient clipping')
group.add_argument('--train-iters', type=int, default=0,
help='total number of iterations to train over all training runs')
group.add_argument('--label-smoothing', type=float, default=0.0)
group.add_argument('--log-interval', type=int, default=100,
help='report interval')
group.add_argument('--summary-dir', type=str, default="", help="The directory to store the summary")
group.add_argument('--seed', type=int, default=1234, help='random seed')
# Batch producer arguments
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token.')
# Learning rate.
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay LR over,'
' If None defaults to `--train-iters`*`--epochs`')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine', 'exponential'],
help='learning rate decay function')
group.add_argument('--lr-decay-ratio', type=float, default=0.1)
group.add_argument('--lr', type=float, default=1.0e-4,
help='initial learning rate')
group.add_argument('--warmup', type=float, default=0.01,
help='percentage of data to warmup on (.01 = 1% of all '
'training iters). Default 0.01')
group.add_argument('--switch-linear', action='store_true', help="Switch to linear decay for cosine decay")
# model checkpointing
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--new-save-directory', action='store_true')
group.add_argument('--save-epoch', type=int, default=1,
help='number of epochs between saves')
group.add_argument('--save-interval', type=int, default=5000,
help='number of iterations between saves')
group.add_argument('--no-save-optim', action='store_true',
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true',
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Path to a directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true',
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true',
help='Do not load rng state when loading checkpoint.')
group.add_argument('--no-load-lr-scheduler', action='store_true',
help='Do not load lr scheduler when loading checkpoint.')
group.add_argument('--no-deepspeed-load', action='store_true', help='Not use deepspeed when loading checkpoint')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
group.add_argument('--resume-dataloader', action='store_true',
help='Resume the dataloader when resuming training. '
'Does not apply to tfrecords dataloader, try resuming'
'with a different seed in this case.')
# distributed training args
group.add_argument('--distributed-backend', default='nccl',
help='which backend to use for distributed training. One of [gloo, nccl]',
choices=['nccl', 'gloo'])
group.add_argument('--DDP-impl', default='torch', choices=['local', 'torch', 'none'],
help='which DistributedDataParallel implementation to use.')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher')
# BlockLM training args
group.add_argument('--block-lm', action='store_true', help="whether use the BlockLM pre-training")
group.add_argument('--masked-lm', action='store_true', help='whether to use the mlm objective')
group.add_argument('--bert-prob', type=float, default=0.5)
group.add_argument('--gpt-infill-prob', type=float, default=0.5)
group.add_argument('--gpt-min-ratio', type=float, default=0.5)
group.add_argument('--gap-sentence-prob', type=float, default=0.0)
group.add_argument('--gap-sentence-ratio', type=float, default=0.15)
group.add_argument('--avg-block-length', type=int, default=3)
group.add_argument('--short-seq-prob', type=float, default=0.0)
group.add_argument('--single-span-prob', type=float, default=0.0)
group.add_argument('--task-mask', action='store_true', help="Use different mask for generation and blank filling")
group.add_argument('--no-shuffle-block', action='store_true', help="not shuffle the blocks when filling the blank")
group.add_argument('--no-block-position', action='store_true',
help='Use (rough) absolute positions instead of block positions')
group.add_argument('--sentinel-token', action='store_true',
help="Use sentinel (mask) tokens to replace 2d position encoding")
group.add_argument('--block-mask-prob', type=float, default=0.0)
group.add_argument('--context-mask-ratio', type=float, default=0.0)
group.add_argument('--random-position', action='store_true',
help="Use random start position to cover all the position embeddings")
return parser | 05c71d77320644fdaf00ef1638e76dbbce60ffb5 | 8,467 |
from typing import Optional
from typing import List
from typing import Dict
def _multi_class_confusion_matrix_plot(
thresholds: Optional[List[float]] = None,
num_thresholds: Optional[int] = None,
name: Text = MULTI_CLASS_CONFUSION_MATRIX_PLOT_NAME,
eval_config: Optional[config.EvalConfig] = None,
model_name: Text = '',
output_name: Text = '',
) -> metric_types.MetricComputations:
"""Returns computations for multi-class confusion matrix plot."""
if num_thresholds is None and thresholds is None:
thresholds = [0.0]
key = metric_types.PlotKey(
name=name, model_name=model_name, output_name=output_name)
# Make sure matrices are calculated.
matrices_computations = (
multi_class_confusion_matrix_metrics.multi_class_confusion_matrices(
thresholds=thresholds,
num_thresholds=num_thresholds,
eval_config=eval_config,
model_name=model_name,
output_name=output_name))
matrices_key = matrices_computations[-1].keys[-1]
def result(
metrics: Dict[metric_types.MetricKey,
multi_class_confusion_matrix_metrics.Matrices]
) -> Dict[metric_types.PlotKey,
metrics_for_slice_pb2.MultiClassConfusionMatrixAtThresholds]:
return {
key:
metrics[matrices_key].to_proto()
.multi_class_confusion_matrix_at_thresholds
}
derived_computation = metric_types.DerivedMetricComputation(
keys=[key], result=result)
computations = matrices_computations
computations.append(derived_computation)
return computations | 9f670ad80ea10c05460815fd9af250d60b035d9e | 8,468 |
def innerL(i, os):
"""
Parameters
----------
i
os:OptStruct
Returns
-------
"""
ei = cal_ek(os, i)
if (os.labels[i] * ei < -os.tol and os.alphas[i] < os.C) or (
os.labels[i] * ei > os.tol and os.alphas[i] > 0
):
j, ej = select_j(i, os, ei)
alphaIold = os.alphas[i].copy()
alphaJold = os.alphas[j].copy()
if os.labels[i] != os.labels[j]:
L = max(0, os.alphas[j] - os.alphas[i])
H = min(os.C, os.C + os.alphas[j] - os.alphas[i])
else:
L = max(0, os.alphas[j] + os.alphas[i] - os.C)
H = min(os.C, os.alphas[j] + os.alphas[i])
if L == H:
print("L==H")
return 0
eta = (
2.0 * os.X[i, :] * os.X[j, :].T
- os.X[i, :] * os.X[i, :].T
- os.X[j, :] * os.X[j, :].T
)
if eta >= 0:
print("eta>=0")
return 0
os.alphas[j] -= os.labels[j] * (ei - ej) / eta
os.alphas[j] = clip_alpha(os.alphas[j], H, L)
update_ek(os, j)
if abs(os.alphas[j] - alphaJold) < 0.00001:
print("j 移动不足")
return 0
os.alphas[i] += os.labels[j] * os.labels[i] * (alphaJold - alphaIold)
update_ek(os, i)
b1 = (
os.b
- ei
- os.labels[i] * (os.alphas[i] - alphaIold) * os.X[i, :] * os.X[i, :].T
- os.labels[j] * (os.alphas[j] - alphaJold) * os.X[i, :] * os.X[i, :].T
)
b2 = (
os.b
- ej
- os.labels[i] * (os.alphas[i] - alphaIold) * os.X[i, :] * os.X[i, :].T
- os.labels[j] * (os.alphas[j] - alphaJold) * os.X[i, :] * os.X[i, :].T
)
if os.alphas[i] and os.C > os.alphas[i]:
os.b = b1
elif os.alphas[j] > 0 and os.C > os.alphas[j]:
os.b = b2
else:
os.b = (b1 + b2) / 2.0
return 1
else:
return 0 | 1fc83191dbb16d863aef7c947408126d79c40099 | 8,469 |
import click
import logging
def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient:
"""Create Cumulocity client and prompt for missing credentials
if necessary.
Args:
ctx (click.Context): Click context
opts (ProxyContext): Proxy options
Returns:
CumulocityClient: Configured Cumulocity client
"""
if not opts.disable_prompts and not opts.host:
opts.host = click.prompt(
text="Enter the Cumulocity Host/URL",
)
client = CumulocityClient(
hostname=opts.host,
tenant=opts.tenant,
user=opts.user,
password=opts.password,
tfacode=opts.tfa_code,
token=opts.token,
ignore_ssl_validate=opts.ignore_ssl_validate,
)
if not client.url:
opts.show_error(
"No Cumulocity host was provided. The host can be set via"
"environment variables, arguments or the env-file"
)
ctx.exit(ExitCodes.NO_SESSION)
logging.info("Checking tenant id")
client.validate_tenant_id()
# Retry logging so the user can be prompted for
# their credentials/TFA code etc. without having to run c8ylp again
retries = 3
success = False
while retries:
try:
if client.token:
client.validate_credentials()
else:
client.login()
if opts.env_file and opts.store_token:
store_credentials(opts, client)
success = True
break
except CumulocityMissingTFAToken as ex:
client.tfacode = click.prompt(
text="Enter your Cumulocity TFA-Token", hide_input=False
)
except Exception as ex:
logging.info("unknown exception: %s", ex)
if not opts.disable_prompts:
if not client.user:
client.user = click.prompt(
text="Enter your Cumulocity Username",
)
if not client.password:
client.password = click.prompt(
text="Enter your Cumulocity Password [input hidden]",
hide_input=True,
)
retries -= 1
if not success:
logging.info("Could not create client")
ctx.exit(ExitCodes.NO_SESSION)
return client | cad28ef10409352fe25ae7310fbaae4a095b8a21 | 8,470 |
import struct
def get_float(data, index):
"""get a float value from data array"""
return struct.unpack('f', "".join(map(chr, data[4*index:(index+1)*4])))[0] | b78a5472bef42312bd765b6f9c58bfe9cddbf311 | 8,471 |
def gelu(tensor):
""" Gaussian Error Linear Unit - https://arxiv.org/abs/1606.08415 """
return 0.5 * tensor * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (tensor + 0.044715 * tf.pow(tensor, 3)))) | acb5101815bb3cd0c30d602fefb0734707bf4acf | 8,472 |
def _uniqueElements(an_iterable):
"""
:param iterable an_iterable:
:param int idx:
:return list: has only one occurrence of each element
"""
used = []
unique = [x for x in an_iterable if x not in used and (used.append(x) or True)]
return unique | 8290d30e48c3ade4a547d7c3a8cf0c57b8d45b19 | 8,473 |
def guestbook_key(guestbook_name=None):
"""Constructs a Datastore key for a Guestbook entity with name."""
return ndb.Key('Guestbook', guestbook_name or 'default_guestbook') | fcff509ad5e48b58ffa823801af134c20e974b56 | 8,474 |
def _bias_scale(x, b, data_format):
"""The multiplication counter part of tf.nn.bias_add."""
if data_format == 'NHWC':
return x * b
elif data_format == 'NCHW':
return x * b
else:
raise ValueError('invalid data_format: %s' % data_format) | 19e5bb9419827f6e6976b1c5ed3cd40cdd676ad0 | 8,475 |
import re
def checkTableName(tables):
""" Check if table name has an underscore or not."""
bad = set()
output = []
for i in tables:
if re.search('.*_.*', i):
bad.add(i)
if bad:
output.append("These tables have underscores in the name")
for i in bad:
output.append(i)
output.append("")
else:
output.append("No malformed table names")
output.append("")
return (output, bad) | 2847c20712e6ce92367772678d058a05b5d10dc3 | 8,476 |
def load_split_from_tfds_builder(builder,
batch_size,
split,
preprocess_example=None,
augment_train_example=None,
shuffle_buffer_size=None,
shuffle_seed=0,
cache=True):
"""Loads a split from a dataset using TensorFlow Datasets compatible builder.
Args:
builder: tfds.core.DatasetBuilder; A TFDS compatible dataset builder.
batch_size: int; The batch size returned by the data pipeline.
split: str; Name of the split to be loaded.
preprocess_example: function; A function that given an example, returns the
preprocessed example. Note that the preprocessing is done BEFORE caching
to re-use them.
augment_train_example: A function that given a train example returns the
augmented example. Note that this function is applied AFTER caching and
repeat to get true randomness.
shuffle_buffer_size: int; Size of the tf.data.dataset shuffle buffer.
shuffle_seed: int; Seed for shuffling the training data.
cache: bool; Whether to cache dataset in memory.
Returns:
A `tf.data.Dataset`, and dataset information.
"""
# Prepare map functions.
preprocess_example = preprocess_example or (lambda ex: ex)
augment_train_example = augment_train_example or (lambda ex: ex)
shuffle_buffer_size = shuffle_buffer_size or (8 * batch_size)
# Download dataset:
builder.download_and_prepare()
# Each host is responsible for a fixed subset of data.
base_split_name, host_start, host_end = get_data_range(
builder, split, jax.process_index(), jax.process_count())
data_range = tfds.core.ReadInstruction(
base_split_name, unit='abs', from_=host_start, to=host_end)
ds = builder.as_dataset(split=data_range, shuffle_files=False)
options = tf.data.Options()
options.threading.private_threadpool_size = 48
ds = ds.with_options(options)
# Applying preprocessing before `ds.cache()` to re-use it.
ds = ds.map(
preprocess_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Caching.
if cache:
ds = ds.cache()
if 'train' in split:
# First repeat then batch.
ds = ds.repeat()
# Augmentation should be done after repeat for true randomness.
ds = ds.map(
augment_train_example, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Shuffle after augmentation to avoid loading uncropped images into buffer:
ds = ds.shuffle(shuffle_buffer_size, seed=shuffle_seed)
ds = ds.batch(batch_size, drop_remainder=True)
else:
# First batch then repeat.
ds = ds.batch(batch_size, drop_remainder=False)
ds = ds.repeat()
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds, builder.info | 63c73f65cedc1fff92ce9a02ea23822c8e411439 | 8,477 |
def analyze(tokens):
"""
表达式元素组合,形成操作树
"""
assert_non_empty(tokens)
# 数字或者操作符
token = analyze_token(tokens.pop(0))
# 如果是数字,直接放回就好了,继续求下一个,因为数字是自求解的,本身就是解
if type(token) in (int, float):
return token
# 如果是操作符,则需要组合为Exp表达式
if token in known_operators:
# 当前是操作符, 则需要检查后面有没有操作数
# 计算器的操作符后面是有操作数的
# 操作数递归组合即可
if len(tokens) == 0 or tokens.pop(0) != '(':
raise SyntaxError('expected ( after ' + token)
return Exp(token, analyze_operands(tokens))
else:
raise SyntaxError('unexpected ' + token) | 369b0b3df423dd3a38e0756379442e428efb7ef3 | 8,479 |
from typing import Mapping
from typing import Any
def copy_dict(dic: Mapping[str, Any], depth: int = 1) -> Mapping[str, Any]:
"""Deep copy a dict
Args:
dic: The dict to be copied
depth: The depth to be deep copied
Returns:
The deep-copied dict
"""
if depth <= 1:
return dic.copy()
return {
key: copy_dict(val, depth - 1) if isinstance(val, dict) else val
for key, val in dic.items()
} | a75f9ef7c8dc797ccdf47cdc3029c403b09e75cf | 8,480 |
def get_wrf_config(wrf_config, start_date=None, **kwargs):
"""
precedence = kwargs > wrf_config.json > constants
"""
if start_date is not None:
wrf_config['start_date'] = start_date
for key in kwargs:
wrf_config[key] = kwargs[key]
return wrf_config | c9e070b91ab93a7cb81a576aa799537361b7a26f | 8,481 |
from Bio import PDB
def pdb_to_structure(filename):
"""Import a structure object from a PDB file.
"""
try:
except ImportError:
print("I can't import Biopython which is needed to handle PDB files.")
raise
p = PDB.PDBParser()
structure = p.get_structure("S", filename)
for _ in structure.get_chains():
atoms = [np.array(atom.get_coord()) for atom in structure.get_atoms()]
return atoms | 1b77b6bc5af75d03af847032827c07656addf4f3 | 8,482 |
def allocation_ncsist():
"""
Real Name: Allocation NCSIST
Original Eqn: IF THEN ELSE( ShiMen Reservoir Depth>=ShiMenReservoir Operation Rule Lower Limit , 6048, IF THEN ELSE( ShiMen Reservoir Depth >=ShiMenReservoir Operation Rule Lower Severe Limit, 6048*0.9 , 6048*0.8 ) )
Units: m3
Limits: (None, None)
Type: component
Subs: None
water right 6048(m^3 per day), the same for each Ten-days; 0.07 CMSD, classified as Domestic.
"""
return if_then_else(
shimen_reservoir_depth() >= shimenreservoir_operation_rule_lower_limit(),
lambda: 6048,
lambda: if_then_else(
shimen_reservoir_depth()
>= shimenreservoir_operation_rule_lower_severe_limit(),
lambda: 6048 * 0.9,
lambda: 6048 * 0.8,
),
) | f2b781869957d78dc59e6837a253bc0df29250bd | 8,483 |
def hamming(s1, s2):
"""Return the hamming distance between 2 DNA sequences"""
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2)) + abs(len(s1) - len(s2)) | e3e1f3e9cc883f27d26f00c1b3c9495d29c1a139 | 8,484 |
import torch
def get_org_df(pr_label_f, metadata_df, seq_len):
"""
Returns the org_df given pr_label_f,metadata_df,
"""
org_r, org_c = torch.nonzero(pr_label_f, as_tuple=True)
org_df = cudf.DataFrame()
org_df["seq_row"] = cudf.Series(org_r)
org_df["org_seq_col"] = cudf.Series(org_c)
org_df = org_df.merge(metadata_df)
org_df = org_df.rename(columns={"seq_row": "org_seq_row"})
org_df["flat_loc_org"] = org_df["org_seq_row"] * seq_len + org_df["org_seq_col"]
### Trim overlapping and invalid predictions
flag = (org_df["org_seq_col"] >= org_df["start_index"]) & (
org_df["org_seq_col"] <= org_df["stop_index"]
)
org_df = org_df[flag]
return org_df[["org_seq_row", "org_seq_col", "input_text_index", "flat_loc_org"]] | 543bfe8f95409eefeb792ee2f94d8518fa4a3fe3 | 8,486 |
from scipy.stats import norm
def binomial_proportion(nsel, ntot, coverage=0.68):
"""
Calculate a binomial proportion (e.g. efficiency of a selection) and its confidence interval.
Parameters
----------
nsel: array-like
Number of selected events.
ntot: array-like
Total number of events.
coverage: float (optional)
Requested fractional coverage of interval (default: 0.68).
Returns
-------
p: array of dtype float
Binomial fraction.
dpl: array of dtype float
Lower uncertainty delta (p - pLow).
dpu: array of dtype float
Upper uncertainty delta (pUp - p).
Examples
--------
>>> p, dpl, dpu = binomial_proportion(50,100,0.68)
>>> round(p, 3)
0.5
>>> round(dpl, 3)
0.049
>>> round(dpu, 3)
0.049
>>> abs(np.sqrt(0.5*(1.0-0.5)/100.0)-0.5*(dpl+dpu)) < 1e-3
True
Notes
-----
The confidence interval is approximate and uses the score method
of Wilson. It is based on the log-likelihood profile and can
undercover the true interval, but the coverage is on average
closer to the nominal coverage than the exact Clopper-Pearson
interval. It is impossible to achieve perfect nominal coverage
as a consequence of the discreteness of the data.
"""
z = norm().ppf(0.5 + 0.5 * coverage)
z2 = z * z
p = np.asarray(nsel, dtype=np.float) / ntot
div = 1.0 + z2 / ntot
pm = (p + z2 / (2 * ntot))
dp = z * np.sqrt(p * (1.0 - p) / ntot + z2 / (4 * ntot * ntot))
pl = (pm - dp) / div
pu = (pm + dp) / div
return p, p - pl, pu - p | 94b9d3cf766ca2f35f677a4421aabc1840097729 | 8,487 |
def nasnet_dual_path_scheme_ordinal(module,
x,
_):
"""
NASNet specific scheme of dual path response for an ordinal module with dual inputs/outputs in a DualPathSequential
module.
Parameters:
----------
module : nn.Module
A module.
x : Tensor
Current processed tensor.
Returns
-------
x_next : Tensor
Next processed tensor.
x : Tensor
Current processed tensor.
"""
return module(x), x | aef487a25bc3349f14a112826ee4f8e8912dd324 | 8,488 |
import json
import traceback
def ifttt_budget_options():
""" Option values for the budget field """
if "IFTTT-Service-Key" not in request.headers or \
request.headers["IFTTT-Service-Key"] != get_ifttt_key():
return json.dumps({"errors": [{"message": "Invalid key"}]}), 401
try:
data = get_ynab_budgets()
return json.dumps({"data": data})
except:
traceback.print_exc()
return json.dumps({"data": [{"label": "ERROR retrieving YNAB data",
"value": ""}]}) | c987ca533fc0568e759e4e6c6affbdb7efeb4781 | 8,489 |
import sqlite3
def get_exp_date_stats(db_file_name, Table):
"""Caculate exp date stats of collection"""
conn = sqlite3.connect(db_file_name)
c = conn.cursor()
c.execute('''SELECT exp, count(exp) FROM {} GROUP BY exp'''.format(Table))
exp_dict = {}
results = c.fetchall()
for result in results:
exp_dict[str(result[0])] = result[1]
conn.commit()
conn.close()
return exp_dict | 7641d6309939359c1d790b66a1310b5b78be99a4 | 8,490 |
import random
def create_default_identifier(node_address, token_address, target):
"""
The default message identifier value is the first 8 bytes of the sha3 of:
- Our Address
- Our target address
- The token address
- A random 8 byte number for uniqueness
"""
hash_ = sha3('{}{}{}{}'.format(
node_address,
target,
token_address,
random.randint(0, UINT64_MAX)
))
return int(hash_[0:8].encode('hex'), 16) | ae63898d0130eda2cbc1a6e3861b288e9b1a4d10 | 8,491 |
def print_scientific_16(value: float) -> str:
"""
Prints a value in 16-character scientific notation.
This is a sub-method and shouldnt typically be called
.. seealso:: print_float_16 for a better method
"""
if value == 0.0:
return '%16s' % '0.'
python_value = '%16.14e' % value # -1.e-2
svalue, sexponent = python_value.strip().split('e')
exponent = int(sexponent) # removes 0s
if abs(value) < 1.:
sign = '-'
else:
sign = '+'
# the exponent will be added later...
sexp2 = str(exponent).strip('-+')
value2 = float(svalue)
# the plus 1 is for the sign
len_sexp = len(sexp2) + 1
leftover = 16 - len_sexp
if value < 0:
fmt = "%%1.%sf" % (leftover - 3)
else:
fmt = "%%1.%sf" % (leftover - 2)
svalue3 = fmt % value2
svalue4 = svalue3.strip('0')
field = "%16s" % (svalue4 + sign + sexp2)
return field | 18072bfb5cc51e83f1c26086558abc4019e4737e | 8,493 |
def _interpolate_target(bin_edges, y_vals, idx, target):
"""Helper to identify when a function y that has been discretized hits value target.
idx is the first index where y is greater than the target
"""
if idx == 0:
y_1 = 0.
else:
y_1 = y_vals[idx - 1]
y_2 = y_vals[idx]
edge_1 = bin_edges[idx]
edge_2 = bin_edges[idx + 1]
frac = (target - y_1) / (y_2 - y_1)
x = edge_1 + frac * (edge_2 - edge_1)
return x | 7a84bc846c8446aa7449732fdb60171d6f144863 | 8,494 |
def azimuth_range_to_lat_lon(azimuths, ranges, center_lon, center_lat, geod=None):
"""Convert azimuth and range locations in a polar coordinate system to lat/lon coordinates.
Pole refers to the origin of the coordinate system.
Parameters
----------
azimuths : array_like
array of azimuths defining the grid. If not a `pint.Quantity`,
assumed to be in degrees.
ranges : array_like
array of range distances from the pole. Typically in meters.
center_lat : float
The latitude of the pole in decimal degrees
center_lon : float
The longitude of the pole in decimal degrees
geod : `pyproj.Geod` or ``None``
PyProj Geod to use for forward azimuth and distance calculations. If ``None``, use a
default spherical ellipsoid.
Returns
-------
lon, lat : 2D arrays of longitudes and latitudes corresponding to original locations
Notes
-----
Credit to Brian Blaylock for the original implementation.
"""
if geod is None:
g = Geod(ellps='sphere')
else:
g = geod
rng2d, az2d = np.meshgrid(ranges, azimuths)
lats = np.full(az2d.shape, center_lat)
lons = np.full(az2d.shape, center_lon)
lon, lat, _ = g.fwd(lons, lats, az2d, rng2d)
return lon, lat | a68e9e6731393f454d5725267b5a7c56e2afaedd | 8,495 |
def count_path_recursive(m, n):
"""Count number of paths with the recursive method."""
def traverse(m, n, location=[1, 1]):
# return 0 if past edge
if location[0] > m or location[1] > n:
return 0
# return 1 if at end position
if location == [m, n]:
return 1
return traverse(m, n, [location[0] + 1, location[1]]) + traverse(m, n, [location[0], location[1] + 1])
return traverse(m, n) | ad31718d179bf46966117ecfa414807e6d356634 | 8,496 |
def markdown(caller):
"""Renders the argument to markdown. Useful in `{% filter markdown() %} `
blocks
Args:
caller (str): Markdown source
Returns:
str: rendered HTML
"""
return render_markdown(caller) | fd3fcea8ae9cbac660c1f8971e89baa1c61467ac | 8,497 |
from typing import Dict
from typing import Any
def color_menu(colno: int, colname: str, entry: Dict[str, Any]) -> int:
# pylint: disable=unused-argument
"""color the menu"""
if entry.get("__shadowed") is True:
return 8
if entry.get("__deprecated") is True:
return 9
return 2 | 090dc76475fbe7507c9687127306c34b0652e16a | 8,499 |
def likelihood(sent, ai, domain, temperature):
"""Computes likelihood of a given sentence according the giving model."""
enc = ai._encode(sent, ai.model.word_dict)
score, _, _= ai.model.score_sent(enc, ai.lang_h, ai.ctx_h, temperature)
return score | 8332dfc8c2dba18a117768043dff67e632cc22ff | 8,500 |
def simulator(
theta,
model="angle",
n_samples=1000,
delta_t=0.001, # n_trials
max_t=20,
no_noise=False,
bin_dim=None,
bin_pointwise=False,
):
"""Basic data simulator for the models included in HDDM.
:Arguments:
theta : list or numpy.array or panda.DataFrame
Parameters of the simulator. If 2d array, each row is treated as a 'trial'
and the function runs n_sample * n_trials simulations.
model: str <default='angle'>
Determines the model that will be simulated.
n_samples: int <default=1000>
Number of simulation runs (for each trial if supplied n_trials > 1)
n_trials: int <default=1>
Number of trials in a simulations run (this specifically addresses trial by trial parameterizations)
delta_t: float
Size fo timesteps in simulator (conceptually measured in seconds)
max_t: float
Maximum reaction the simulator can reach
no_noise: bool <default=False>
Turn noise of (useful for plotting purposes mostly)
bin_dim: int <default=None>
Number of bins to use (in case the simulator output is supposed to come out as a count histogram)
bin_pointwise: bool <default=False>
Wheter or not to bin the output data pointwise. If true the 'RT' part of the data is now specifies the
'bin-number' of a given trial instead of the 'RT' directly. You need to specify bin_dim as some number for this to work.
:Return: tuple
can be (rts, responses, metadata)
or (rt-response histogram, metadata)
or (rts binned pointwise, responses, metadata)
"""
# Useful for sbi
if type(theta) == list:
print("theta is supplied as list --> simulator assumes n_trials = 1")
theta = np.asarray(theta).astype(np.float32)
elif type(theta) == np.ndarray:
theta = theta.astype(np.float32)
elif type(theta) == pd.core.frame.DataFrame:
theta = theta[model_config[model]["params"]].values.astype(np.float32)
else:
theta = theta.numpy().astype(float32)
if len(theta.shape) < 2:
theta = np.expand_dims(theta, axis=0)
if theta.ndim > 1:
n_trials = theta.shape[0]
else:
n_trials = 1
# 2 choice models
if no_noise:
s = 0.0
else:
s = 1.0
if model == "test":
x = ddm_flexbound(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
boundary_params={},
boundary_fun=bf.constant,
boundary_multiplicative=True,
max_t=max_t,
)
if model == "ddm" or model == "ddm_elife" or model == "ddm_analytic":
x = ddm_flexbound(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
boundary_params={},
boundary_fun=bf.constant,
boundary_multiplicative=True,
max_t=max_t,
)
if model == "ddm_legacy" or model == "ddm_vanilla":
x = ddm(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
)
if model == "full_ddm_legacy" or model == "full_ddm_vanilla":
x = full_ddm_vanilla(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
sz=theta[:, 4],
sv=theta[:, 5],
st=theta[:, 6],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
)
if model == "angle" or model == "angle2":
x = ddm_flexbound(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
s=s,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 4]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if (
model == "weibull_cdf"
or model == "weibull_cdf2"
or model == "weibull_cdf_ext"
or model == "weibull_cdf_concave"
or model == "weibull"
):
x = ddm_flexbound(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
s=s,
boundary_fun=bf.weibull_cdf,
boundary_multiplicative=True,
boundary_params={"alpha": theta[:, 4], "beta": theta[:, 5]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "levy":
x = levy_flexbound(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
alpha_diff=theta[:, 3],
t=theta[:, 4],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "full_ddm" or model == "full_ddm2":
x = full_ddm(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
sz=theta[:, 4],
sv=theta[:, 5],
st=theta[:, 6],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "ddm_sdv":
x = ddm_sdv(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
t=theta[:, 3],
sv=theta[:, 4],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "ornstein" or model == "ornstein_uhlenbeck":
x = ornstein_uhlenbeck(
v=theta[:, 0],
a=theta[:, 1],
z=theta[:, 2],
g=theta[:, 3],
t=theta[:, 4],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
# 3 Choice models
if no_noise:
s = np.tile(np.array([0.0, 0.0, 0.0], dtype=np.float32), (n_trials, 1))
else:
s = np.tile(np.array([1.0, 1.0, 1.0], dtype=np.float32), (n_trials, 1))
if model == "race_3":
x = race_model(
v=theta[:, :3],
a=theta[:, [3]],
z=theta[:, 4:7],
t=theta[:, [7]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "race_no_bias_3":
x = race_model(
v=theta[:, :3],
a=theta[:, [3]],
z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]),
t=theta[:, [5]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "race_no_bias_angle_3":
x = race_model(
v=theta[:, :3],
a=theta[:, [3]],
z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]),
t=theta[:, [5]],
s=s,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 6]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_3":
x = lca(
v=theta[:, :3],
a=theta[:, [3]],
z=theta[:, 4:7],
g=theta[:, [7]],
b=theta[:, [8]],
t=theta[:, [9]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_no_bias_3":
x = lca(
v=theta[:, :3],
a=theta[:, [3]],
z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]),
g=theta[:, [5]],
b=theta[:, [6]],
t=theta[:, [7]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_no_bias_angle_3":
x = lca(
v=theta[:, :3],
a=theta[:, [3]],
z=np.column_stack([theta[:, [4]], theta[:, [4]], theta[:, [4]]]),
g=theta[:, [5]],
b=theta[:, [6]],
t=theta[:, [7]],
s=s,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 8]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
# 4 Choice models
if no_noise:
s = np.tile(np.array([0.0, 0.0, 0.0, 0.0], dtype=np.float32), (n_trials, 1))
else:
s = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32), (n_trials, 1))
if model == "race_4":
x = race_model(
v=theta[:, :4],
a=theta[:, [4]],
z=theta[:, 5:9],
t=theta[:, [9]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "race_no_bias_4":
x = race_model(
v=theta[:, :4],
a=theta[:, [4]],
z=np.column_stack(
[theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]]
),
t=theta[:, [6]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "race_no_bias_angle_4":
x = race_model(
v=theta[:, :4],
a=theta[:, [4]],
z=np.column_stack(
[theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]]
),
t=theta[:, [6]],
s=s,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 7]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_4":
x = lca(
v=theta[:, :4],
a=theta[:, [4]],
z=theta[:, 5:9],
g=theta[:, [9]],
b=theta[:, [10]],
t=theta[:, [11]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_no_bias_4":
x = lca(
v=theta[:, :4],
a=theta[:, [4]],
z=np.column_stack(
[theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]]
),
g=theta[:, [6]],
b=theta[:, [7]],
t=theta[:, [8]],
s=s,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
if model == "lca_no_bias_angle_4":
x = lca(
v=theta[:, :4],
a=theta[:, [4]],
z=np.column_stack(
[theta[:, [5]], theta[:, [5]], theta[:, [5]], theta[:, [5]]]
),
g=theta[:, [6]],
b=theta[:, [7]],
t=theta[:, [8]],
s=s,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 9]},
delta_t=delta_t,
n_samples=n_samples,
n_trials=n_trials,
max_t=max_t,
)
# Seq / Parallel models (4 choice)
if no_noise:
s = 0.0
else:
s = 1.0
# Precompute z_vector for no_bias models
z_vec = np.tile(np.array([0.5], dtype=np.float32), reps=n_trials)
if model == "ddm_seq2":
x = ddm_flexbound_seq2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=theta[:, 4],
z_l_1=theta[:, 5],
z_l_2=theta[:, 6],
t=theta[:, 7],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_seq2_no_bias":
x = ddm_flexbound_seq2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_seq2_angle_no_bias":
x = ddm_flexbound_seq2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 5]},
)
if model == "ddm_seq2_weibull_no_bias":
x = ddm_flexbound_seq2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.weibull_cdf,
boundary_multiplicative=True,
boundary_params={"alpha": theta[:, 5], "beta": theta[:, 6]},
)
if model == "ddm_par2":
x = ddm_flexbound_par2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=theta[:, 4],
z_l_1=theta[:, 5],
z_l_2=theta[:, 6],
t=theta[:, 7],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_par2_no_bias":
x = ddm_flexbound_par2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_par2_angle_no_bias":
x = ddm_flexbound_par2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 5]},
)
if model == "ddm_par2_weibull_no_bias":
x = ddm_flexbound_par2(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
t=theta[:, 4],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.weibull_cdf,
boundary_multiplicative=True,
boundary_params={"alpha": theta[:, 5], "beta": theta[:, 6]},
)
if model == "ddm_mic2_adj":
x = ddm_flexbound_mic2_adj(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=theta[:, 4], # np.array([0.5], dtype = np.float32),
z_l_1=theta[:, 5], # np.array([0.5], dtype = np.float32),
z_l_2=theta[:, 6], # np.array([0.5], dtype = np.float32),
d=theta[:, 7],
t=theta[:, 8],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_mic2_adj_no_bias":
x = ddm_flexbound_mic2_adj(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec[:],
z_l_1=z_vec[:],
z_l_2=z_vec[:],
d=theta[:, 4],
t=theta[:, 5],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.constant,
boundary_multiplicative=True,
boundary_params={},
)
if model == "ddm_mic2_adj_angle_no_bias":
x = ddm_flexbound_mic2_adj(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
d=theta[:, 4],
t=theta[:, 5],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.angle,
boundary_multiplicative=False,
boundary_params={"theta": theta[:, 6]},
)
if model == "ddm_mic2_adj_weibull_no_bias":
x = ddm_flexbound_mic2_adj(
v_h=theta[:, 0],
v_l_1=theta[:, 1],
v_l_2=theta[:, 2],
a=theta[:, 3],
z_h=z_vec,
z_l_1=z_vec,
z_l_2=z_vec,
d=theta[:, 4],
t=theta[:, 5],
s=s,
n_samples=n_samples,
n_trials=n_trials,
delta_t=delta_t,
max_t=max_t,
boundary_fun=bf.weibull_cdf,
boundary_multiplicative=True,
boundary_params={"alpha": theta[:, 6], "beta": theta[:, 7]},
)
# Output compatibility
if n_trials == 1:
x = (np.squeeze(x[0], axis=1), np.squeeze(x[1], axis=1), x[2])
if n_trials > 1 and n_samples == 1:
x = (np.squeeze(x[0], axis=0), np.squeeze(x[1], axis=0), x[2])
x[2]["model"] = model
if bin_dim == 0 or bin_dim == None:
return x
elif bin_dim > 0 and n_trials == 1 and not bin_pointwise:
binned_out = bin_simulator_output(x, nbins=bin_dim)
return (binned_out, x[2])
elif bin_dim > 0 and n_trials == 1 and bin_pointwise:
binned_out = bin_simulator_output_pointwise(x, nbins=bin_dim)
return (
np.expand_dims(binned_out[:, 0], axis=1),
np.expand_dims(binned_out[:, 1], axis=1),
x[2],
)
elif bin_dim > 0 and n_trials > 1 and n_samples == 1 and bin_pointwise:
binned_out = bin_simulator_output_pointwise(x, nbins=bin_dim)
return (
np.expand_dims(binned_out[:, 0], axis=1),
np.expand_dims(binned_out[:, 1], axis=1),
x[2],
)
elif bin_dim > 0 and n_trials > 1 and n_samples > 1 and bin_pointwise:
return "currently n_trials > 1 and n_samples > 1 will not work together with bin_pointwise"
elif bin_dim > 0 and n_trials > 1 and not bin_pointwise:
return "currently binned outputs not implemented for multi-trial simulators"
elif bin_dim == -1:
return "invalid bin_dim" | 370e45499f85bd406a2f80230389dd6aa9866cf0 | 8,501 |
from pytato.utils import with_indices_for_broadcasted_shape
from typing import Union
def logical_not(x: ArrayOrScalar) -> Union[Array, bool]:
"""
Returns the element-wise logical NOT of *x*.
"""
if isinstance(x, SCALAR_CLASSES):
# https://github.com/python/mypy/issues/3186
return np.logical_not(x) # type: ignore
assert isinstance(x, Array)
return IndexLambda(with_indices_for_broadcasted_shape(prim.Variable("_in0"),
x.shape,
x.shape),
shape=x.shape,
dtype=np.dtype(np.bool8),
bindings={"_in0": x}) | 922f7a0688590fad9492b7e654f97b2f34717ca8 | 8,502 |
def _build_xyz_pow(name, pref, l, m, n, shift=2):
"""
Builds an individual row contraction line.
name = pref * xc_pow[n] yc_pow[m] * zc_pow[n]
"""
l = l - shift
m = m - shift
n = n - shift
if (pref <= 0) or (l < 0) or (n < 0) or (m < 0):
return None
mul = " "
if pref == 1:
ret = name + " ="
else:
# Basically always an int
ret = name + " = %2.1f" % float(pref)
mul = " * "
if l > 0:
ret += mul + "xc_pow[%d]" % (l - 1)
mul = " * "
if m > 0:
ret += mul + "yc_pow[%d]" % (m - 1)
mul = " * "
if n > 0:
ret += mul + "zc_pow[%d]" % (n - 1)
mul = " * "
if mul == " ":
ret += " 1"
return ret | 0dbae02252b27845e795a586e2e28b58c948fa1d | 8,503 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.