content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_custom_data_format(*args): """ get_custom_data_format(dfid) -> data_format_t Get definition of a registered custom data format. @param dfid: data format id (C++: int) @return: data format definition or NULL """ return _ida_bytes.get_custom_data_format(*args)
8b8751f94a409dc656efbe8131de66a0916be9ea
8,400
def memory(info, func, expr): """ checks if the function has been called with the same argument previously and if so, returns the same results instead of running the function again args: - """ rows=None if info: if func in info.evaluated: if expr in info.evaluated[func]: rows = info.evaluated[func][expr] else: info.evaluated[func] = {} else: info = Info() info.evaluated[func] = {} return info, rows
693ad671b21efbc872508b7a5a4c4aa31852d10a
8,401
def friend_invitation_by_email_verify_for_api( # friendInvitationByEmailVerify voter_device_id, invitation_secret_key, web_app_root_url=''): """ :param voter_device_id: :param invitation_secret_key: :param web_app_root_url: :return: """ status = "" success = False # If a voter_device_id is passed in that isn't valid, we want to throw an error device_id_results = is_voter_device_id_valid(voter_device_id) if not device_id_results['success']: status += device_id_results['status'] json_data = { 'status': status, 'success': False, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': False, 'invitation_found': False, 'attempted_to_approve_own_invitation': False, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': False, } return json_data if not positive_value_exists(invitation_secret_key): status += "VOTER_EMAIL_ADDRESS_VERIFY_MISSING_SECRET_KEY " error_results = { 'status': status, 'success': True, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': False, 'invitation_found': False, 'attempted_to_approve_own_invitation': False, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': False, } return error_results voter_manager = VoterManager() voter_results = voter_manager.retrieve_voter_from_voter_device_id(voter_device_id) voter_id = voter_results['voter_id'] if not positive_value_exists(voter_id): status += "VOTER_NOT_FOUND_FROM_VOTER_DEVICE_ID " error_results = { 'status': status, 'success': False, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': False, 'invitation_found': False, 'attempted_to_approve_own_invitation': False, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': False, } return error_results voter = voter_results['voter'] voter_we_vote_id = voter.we_vote_id voter_has_data_to_preserve = voter.has_data_to_preserve() friend_manager = FriendManager() friend_invitation_results = friend_manager.retrieve_friend_invitation_from_secret_key( invitation_secret_key, for_accepting_friendship=True, read_only=False) if not friend_invitation_results['friend_invitation_found']: status += "INVITATION_NOT_FOUND_FROM_SECRET_KEY " error_results = { 'status': status, 'success': True, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': voter_has_data_to_preserve, 'invitation_found': False, 'attempted_to_approve_own_invitation': False, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': False, } return error_results # Now that we have the friend_invitation data, look more closely at it invitation_found = True voter_we_vote_id_accepting_invitation = "" email_manager = EmailManager() if friend_invitation_results['friend_invitation_voter_link_found']: friend_invitation_voter_link = friend_invitation_results['friend_invitation_voter_link'] if friend_invitation_voter_link.sender_voter_we_vote_id == voter_we_vote_id: status += "SENDER_AND_RECIPIENT_ARE_IDENTICAL_FAILED " error_results = { 'status': status, 'success': True, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': voter_has_data_to_preserve, 'invitation_found': True, 'attempted_to_approve_own_invitation': True, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': True, } return error_results voter_we_vote_id_accepting_invitation = friend_invitation_voter_link.recipient_voter_we_vote_id # Now we want to make sure we have a current_friend entry recipient_organization_we_vote_id = '' voter_results = voter_manager.retrieve_voter_by_we_vote_id( friend_invitation_voter_link.recipient_voter_we_vote_id) if voter_results['voter_found']: recipient_organization_we_vote_id = voter_results['voter'].linked_organization_we_vote_id friend_results = friend_manager.create_or_update_current_friend( friend_invitation_voter_link.sender_voter_we_vote_id, friend_invitation_voter_link.recipient_voter_we_vote_id, voter.linked_organization_we_vote_id, recipient_organization_we_vote_id ) friend_manager.update_suggested_friends_starting_with_one_voter( friend_invitation_voter_link.sender_voter_we_vote_id) friend_manager.update_suggested_friends_starting_with_one_voter( friend_invitation_voter_link.recipient_voter_we_vote_id) accepting_voter_we_vote_id = voter_we_vote_id_accepting_invitation original_sender_we_vote_id = friend_invitation_voter_link.sender_voter_we_vote_id results = friend_accepted_invitation_send(accepting_voter_we_vote_id, original_sender_we_vote_id, web_app_root_url=web_app_root_url) # Update the PositionNetworkCount entries for both friends add_position_network_count_entries_for_one_friend( 0, accepting_voter_we_vote_id, voter_we_vote_id=original_sender_we_vote_id) add_position_network_count_entries_for_one_friend( 0, original_sender_we_vote_id, voter_we_vote_id=accepting_voter_we_vote_id) # Now that a CurrentFriend entry exists, update the FriendInvitation... if friend_results['success']: try: friend_invitation_voter_link.invitation_status = ACCEPTED friend_invitation_voter_link.save() except Exception as e: success = False status += 'FAILED_TO_UPDATE_INVITATION_STATUS1 ' + str(e) + ' ' else: success = False status += "friend_invitation_voter_link_found CREATE_OR_UPDATE_CURRENT_FRIEND_FAILED " # We don't need to do anything with the email because this was an invitation to a known voter elif friend_invitation_results['friend_invitation_email_link_found']: friend_invitation_email_link = friend_invitation_results['friend_invitation_email_link'] if friend_invitation_email_link.sender_voter_we_vote_id == voter_we_vote_id: status += "SENDER_AND_RECIPIENT_ARE_IDENTICAL_FAILED " error_results = { 'status': status, 'success': False, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': voter_has_data_to_preserve, 'invitation_found': True, 'attempted_to_approve_own_invitation': True, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': False, } return error_results this_voter_has_first_or_last_name_saved = voter_manager.this_voter_has_first_or_last_name_saved(voter) if positive_value_exists(friend_invitation_email_link.recipient_first_name) or \ positive_value_exists(friend_invitation_email_link.recipient_last_name): we_have_first_or_last_name_from_friend_invitation_email_link = True else: we_have_first_or_last_name_from_friend_invitation_email_link = False # Check to see if the email used has been claimed by a voter account yet temp_voter_we_vote_id = "" update_voter_name = False email_results = email_manager.retrieve_primary_email_with_ownership_verified( temp_voter_we_vote_id, friend_invitation_email_link.recipient_voter_email) if email_results['email_address_object_found']: # The email belongs to this or another voter email_address_object = email_results['email_address_object'] voter_we_vote_id_accepting_invitation = email_address_object.voter_we_vote_id # We might need to heal the data in the voter record if voter_we_vote_id_accepting_invitation != voter_we_vote_id: email_owner_results = voter_manager.retrieve_voter_by_we_vote_id(email_address_object.voter_we_vote_id) if email_owner_results['voter_found']: email_owner_voter = email_owner_results['voter'] voter_manager.update_voter_email_ownership_verified(email_owner_voter, email_address_object) else: # If we are here, then the email_address_object doesn't belong to another voter and can be # claimed by this current voter. voter_manager.update_voter_email_ownership_verified(voter, email_address_object) if we_have_first_or_last_name_from_friend_invitation_email_link and \ not this_voter_has_first_or_last_name_saved: # The current voter does not have first or last name, and we have incoming names to apply update_voter_name = True else: voter_we_vote_id_accepting_invitation = voter_we_vote_id # If we are here, we know the email is unclaimed. We can assign it to the current voter. # Is there an email address entry for this voter/email? email_we_vote_id = '' email_results = email_manager.retrieve_email_address_object( friend_invitation_email_link.recipient_voter_email, email_we_vote_id, voter_we_vote_id) if email_results['email_address_object_found']: email_address_object = email_results['email_address_object'] try: email_address_object.email_ownership_is_verified = True email_address_object.secret_key = generate_random_string(12) # Reset the secret_key email_address_object.save() voter_manager.update_voter_email_ownership_verified(voter, email_address_object) if we_have_first_or_last_name_from_friend_invitation_email_link and \ not this_voter_has_first_or_last_name_saved: # The current voter does not have first or last name, and we have incoming names to apply update_voter_name = True except Exception as e: success = False status += 'FAILED_TO_UPDATE_UNVERIFIED_EMAIL ' + str(e) + ' ' else: email_ownership_is_verified = True email_create_results = email_manager.create_email_address_for_voter( friend_invitation_email_link.recipient_voter_email, voter, email_ownership_is_verified) if email_create_results['email_address_object_saved']: email_address_object = email_create_results['email_address_object'] voter_manager.update_voter_email_ownership_verified(voter, email_address_object) if we_have_first_or_last_name_from_friend_invitation_email_link and \ not this_voter_has_first_or_last_name_saved: # The current voter does not have first or last name, and we have incoming names to apply update_voter_name = True # The current voter does not have first or last name, and we have incoming names that can be used if update_voter_name: results = voter_manager.update_voter_name_by_object( voter, friend_invitation_email_link.recipient_first_name, friend_invitation_email_link.recipient_last_name) if results['voter_updated']: voter = results['voter'] # Now that we know who owns the recipient_email_address, update invitation status sender_organization_we_vote_id = '' voter_results = voter_manager.retrieve_voter_by_we_vote_id( friend_invitation_email_link.sender_voter_we_vote_id) if voter_results['voter_found']: sender_organization_we_vote_id = voter_results['voter'].linked_organization_we_vote_id friend_results = friend_manager.create_or_update_current_friend( friend_invitation_email_link.sender_voter_we_vote_id, voter_we_vote_id_accepting_invitation, sender_organization_we_vote_id, voter.linked_organization_we_vote_id ) friend_manager.update_suggested_friends_starting_with_one_voter( friend_invitation_email_link.sender_voter_we_vote_id) friend_manager.update_suggested_friends_starting_with_one_voter(voter_we_vote_id_accepting_invitation) accepting_voter_we_vote_id = voter_we_vote_id_accepting_invitation original_sender_we_vote_id = friend_invitation_email_link.sender_voter_we_vote_id friend_accepted_invitation_send(accepting_voter_we_vote_id, original_sender_we_vote_id, web_app_root_url=web_app_root_url) # Update the PositionNetworkCount entries for both friends add_position_network_count_entries_for_one_friend( 0, accepting_voter_we_vote_id, voter_we_vote_id=original_sender_we_vote_id) add_position_network_count_entries_for_one_friend( 0, original_sender_we_vote_id, voter_we_vote_id=accepting_voter_we_vote_id) if friend_results['success']: try: friend_invitation_email_link.invitation_status = ACCEPTED friend_invitation_email_link.save() success = True status += ' friend_invitation_email_link_found FRIENDSHIP_CREATED ' except Exception as e: success = False status += 'FAILED_TO_UPDATE_INVITATION_STATUS2 ' + str(e) + ' ' else: success = False status += "friend_invitation_email_link_found CREATE_OR_UPDATE_CURRENT_FRIEND_FAILED " # And finally, create an organization for this brand new signed-in voter so they can create public opinions organization_name = voter.get_full_name() organization_website = "" organization_twitter_handle = "" organization_twitter_id = "" organization_email = "" organization_facebook = "" organization_image = voter.voter_photo_url() organization_type = INDIVIDUAL organization_manager = OrganizationManager() create_results = organization_manager.create_organization( organization_name, organization_website, organization_twitter_handle, organization_email, organization_facebook, organization_image, organization_twitter_id, organization_type) if create_results['organization_created']: # Add value to twitter_owner_voter.linked_organization_we_vote_id when done. organization = create_results['organization'] try: voter.linked_organization_we_vote_id = organization.we_vote_id voter.save() status += "VOTER_AND_ORGANIZATION_CREATED_FROM_FRIEND_INVITATION " except Exception as e: status += "UNABLE_CREATE_AND_LINK_VOTER_FROM_FRIEND_INVITATION " + str(e) + ' ' invitation_secret_key_belongs_to_this_voter = \ voter_we_vote_id == voter_we_vote_id_accepting_invitation json_data = { 'status': status, 'success': success, 'voter_device_id': voter_device_id, 'voter_has_data_to_preserve': voter_has_data_to_preserve, 'invitation_found': invitation_found, 'attempted_to_approve_own_invitation': False, 'invitation_secret_key': invitation_secret_key, 'invitation_secret_key_belongs_to_this_voter': invitation_secret_key_belongs_to_this_voter, } return json_data
5765f5e06a40de81b36a2dc594f6f67ec236ddcb
8,402
import os def preprocess_rinex(rinex_file, target_directory=None): """Read a RINEX Navigation Message file and reformat the data. Read a file with name "BRDC00IGS_R_yyyyddd0000_01D_MN.rnx" that was downloaded from https://cddis.nasa.gov/archive/gnss/data/daily/yyyy/brdc/ where yyyy is the year and ddd is the day of the year. The file must be unpacked first such that it is a plain text file in RINEX 3 format. It is expected to contain data for GPS (G), Galileo (E), and BeiDou (C) in this order. If target_directory=None, then return a 2D navigation data NumPy array with 21 rows for GPS, Galileo, and BeiDou, respectively, else attempt to write the three arrays in the '.npy' format to the directory specified in target_directory: yyyy_ddd_G.npy for GPS, yyyy_ddd_E.npy for Galileo, and yyyy_ddd_C.npy for BeiDou. Units are either seconds, meters, or radians. Typical call: preprocess_rinex("BRDC00IGS_R_20203410000_01D_MN.rnx") Inputs: rinex_file - Path to unpacked RINEX Navigation Message file ending with "BRDC00IGS_R_yyyyddd0000_01D_MN.rnx" target_directory - Directory to store the navigation data matrices or None if they shall be returned, default=None Outputs: eph_G - GPS navigation data matrix, 2D NumPy array with 21 rows eph_E - Galileo navigation data matrix, 2D NumPy array with 21 rows eph_D - BeiDou navigation data matrix, 2D NumPy array with 21 rows Author: Jonas Beuchert """ with open(rinex_file, "r") as fide: # Skip header line = fide.readline() while not line == "" and not line.find("END OF HEADER") > -1: line = fide.readline() if line == "": raise Exception( "Invalid RINEX navigation data file." ) # Expected maximum number of columns for a single GNSS (Galileo) max_col = 20000 # Set aside memory for the input svprn = np.zeros(max_col) toe = np.zeros(max_col) af2 = np.zeros(max_col) af1 = np.zeros(max_col) af0 = np.zeros(max_col) deltan = np.zeros(max_col) M0 = np.zeros(max_col) ecc = np.zeros(max_col) roota = np.zeros(max_col) toe = np.zeros(max_col) cic = np.zeros(max_col) crc = np.zeros(max_col) cis = np.zeros(max_col) crs = np.zeros(max_col) cuc = np.zeros(max_col) cus = np.zeros(max_col) Omega0 = np.zeros(max_col) omega = np.zeros(max_col) i0 = np.zeros(max_col) Omegadot = np.zeros(max_col) idot = np.zeros(max_col) # Create list for returned matrices eph = [] # Loop over all three GNSS (expected order: GPS, Galileo, BeiDou) line = fide.readline() gnss_list = ["G", "E", "C"] while not line == "" and len(gnss_list) > 0: gnss = gnss_list.pop(0) # Loop until next desired GNSS is found while not line == "" and line[0] != gnss: line = fide.readline() if line == "": raise Exception( "RINEX navigation data file does not contain data for " + "all desired GNSS or they are not in the desired order " + "(G - E - C)." ) # reset index i = 0 # Loop over all entries for this GNSS while line[0] == gnss: try: # Read one entry svprn[i] = int(line[1:3]) af0[i] = float(line[23:42]) af1[i] = float(line[42:61]) af2[i] = float(line[61:80]) line = fide.readline() crs[i] = float(line[23:42]) deltan[i] = float(line[42:61]) M0[i] = float(line[61:80]) line = fide.readline() cuc[i] = float(line[4:23]) ecc[i] = float(line[23:42]) cus[i] = float(line[42:61]) roota[i] = float(line[61:80]) line = fide.readline() toe[i] = float(line[4:23]) cic[i] = float(line[23:42]) Omega0[i] = float(line[42:61]) cis[i] = float(line[61:80]) line = fide.readline() i0[i] = float(line[4:23]) crc[i] = float(line[23:42]) omega[i] = float(line[42:61]) Omegadot[i] = float(line[61:80]) line = fide.readline() idot[i] = float(line[4:23]) line = fide.readline() line = fide.readline() except: Exception( "Found corrupted entry for GNSS {}.".format(gnss) ) # Read first line of next entry line = fide.readline() i += 1 # Reformat data into array with 21 rows eph.append(np.array( [ svprn[:i], af2[:i], M0[:i], roota[:i], deltan[:i], ecc[:i], omega[:i], cuc[:i], cus[:i], crc[:i], crs[:i], i0[:i], idot[:i], cic[:i], cis[:i], Omega0[:i], Omegadot[:i], toe[:i], af0[:i], af1[:i], toe[:i] ] )) if len(gnss_list) > 0: raise Exception( "RINEX navigation data file does not contain data for " + "all desired GNSS or they are not in the desired order " + "(G - E - C)." ) if target_directory is None: return tuple(eph) # Extract year and day of year yyyy = rinex_file[-22:-18] ddd = rinex_file[-18:-15] # Save three .npy files for gnss, eph_gnss in zip(["G", "E", "C"], eph): np.save(os.path.join(target_directory, yyyy + "_" + ddd + "_" + gnss), eph_gnss)
ec05ba7c6384d223b2059ffead82f09bb15b5e1e
8,403
import torch def _load_image(fnames, dim=None, device=None, label=False): """Load a N-D image from disk""" dat, affine = _map_image(fnames, dim) if label: dtype = dat.dtype if isinstance(dtype, (list, tuple)): dtype = dtype[0] dtype = dtypes.as_torch(dtype, upcast=True) dat0 = dat.data(device=device, dtype=dtype)[0] # assume single channel if label is True: label = dat0.unique(sorted=True) label = label[label != 0].tolist() dat = torch.zeros([len(label), *dat0.shape], device=device) for i, l in enumerate(label): dat[i] = dat0 == l else: dat = dat.fdata(device=device, rand=True) affine = affine.to(dat.device, dat.dtype) return dat, affine
8f5dae0666d0173e57a8f0005a4f6d491d2bd58f
8,404
import os def GetCurrentVersion(paths, platform): """Find the current component version by iterating gsbucket root folder. Args: paths: ([str]) a list of folder paths strings. platform: (str) the platform for which the component is being built Returns: str: current component version. str: gs path for current component version. """ current_version = LooseVersion('0.0.0.0') current_version_path = None for version_path in paths: if version_path[-1] != '/': logger.fatal("version_path (%s) needs to end with '/'.", version_path) continue version = os.path.basename(version_path[:-1]) if len(ParseVersion(version)) < 3: # Path does not contain a component version. continue v = LooseVersion(version) if v > current_version: # Skip the version if the path for the target platform does not exist. ctx = gs.GSContext() src = os.path.join(version_path, platform, COMPONENT_ZIP) if not ctx.Exists(src): continue current_version = v current_version_path = version_path return str(current_version), current_version_path
2358219202b827a399c83358a2d24994d4b0b898
8,405
from typing import Tuple import ctypes def dtpool(name: str) -> Tuple[int, str, bool]: """ Return the data about a kernel pool variable. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dtpool_c.html :param name: Name of the variable whose value is to be returned. :return: Number of values returned for name, Type of the variable "C", "N", or "X". """ name = stypes.string_to_char_p(name) found = ctypes.c_int() n = ctypes.c_int() typeout = ctypes.c_char() libspice.dtpool_c(name, ctypes.byref(found), ctypes.byref(n), ctypes.byref(typeout)) return n.value, stypes.to_python_string(typeout.value), bool(found.value)
96584733f7f2ad93ed50e0e48ccd1040eb08dd17
8,406
def add_similar_tracks(position_or_range = ":", howmany=5, relative_positions=True): """ Adds Up to the value of howmany tracks similar to each track on the current playlist. parameters: =========== # position_or_range: The position of the track to add similar tracks to. Can also be a range string as "START:STOP" with the STOP position not included, acceptable ranges include empty START and/or empty STOP values for the first and last values in the playlist. The character "c" can be used to indicate the current playing posision. Example: ":" for all tracks ":-3" for all but the last three tracks "c:" for the current playing song and the rest following it # howmany: Maximum number of added similar tracks per existing track # relative_position: Whether to add similar tracks after their respective original track (True) or at the end of the playlist (False) Returns the number of added tracks """ # Handle the range case if type(position_or_range) == str and ":" in position_or_range: (start, stop) = position_or_range.split(":") if start == "": start = 0 if stop == "": stop = get_playlist_length() if start == "c": start = _mpd_current_playlist_position() if stop == "c": stop = _mpd_current_playlist_position() (start, stop) = (int(start), int(stop)) if stop < 0: stop = get_playlist_length + stop added = 0 for i in xrange(start, stop): if relative_positions: added += add_similar_tracks(i+added, howmany, True) else: added += add_similar_tracks(i, howmany, False) return added # Handle the single position case added = 0 relative_buffer = {} normal_buffer = [] if position_or_range == "c": position = int(_mpd_current_playlist_position()) else: position = int(position_or_range) # Get similar tracks' URIs for track in _get_similar_tracks(_mpd_get_playlist(position)[0]): if added >= howmany: break # look up track uris = _mpd_lookup_track(track) if not uris: continue # check to see if it's already added uri = uris[0] if _is_track_added(uri): continue # add it to the buffer if relative_positions: relative_buffer[position+added+1] = uri else: normal_buffer += [uri] added += 1 if added < howmany: print added artist = _mpd_get_playlist(position)[0]["artist"] artists = [artist] artists.extend(_get_similar_artists(artist)) songs = [] for a in artists: uris = _mpd_lookup_artist_tracks(artist) songs.extend(uris) random.shuffle(songs) for song in songs: if added >= howmany: break # check to see if it's already added if _is_track_added(song): continue # add it to the buffer if relative_positions: relative_buffer[position+added+1] = song else: normal_buffer += [song] added += 1 print added # add tracks from buffer _mpd_client.command_list_ok_begin() if relative_positions: keys = relative_buffer.keys() keys.sort() for key in keys: _mpd_add_track(relative_buffer[key], key) else: for uri in normal_buffer: _mpd_add_track(uri) _mpd_client.command_list_end() return added
778eadc4d00de1aa2f293d963cdd643499f234dd
8,407
def get_hot_article_tags(): """ 获取文章的所有标签 :return: 返回所有文章的标签 """ return Tag.objects.filter(is_hot=True)
d4158308ef3ea3cbce646548dc47def9e35fcef7
8,408
from unittest.mock import patch def patch_user_interface_null() -> MockedUserInterfaceNull: """Patch player interface with no players.""" return patch("steam.api.interface", return_value=MockedUserInterfaceNull())
8491d7beef4dfbf949cfb5a27106a948e30487c2
8,409
import sys def confirm(new_command, side_effect, settings): """Returns `True` when running of new command confirmed.""" if not settings.require_confirmation: logs.show_command(new_command, side_effect, settings) return True logs.confirm_command(new_command, side_effect, settings) try: sys.stdin.read(1) return True except KeyboardInterrupt: logs.failed('Aborted', settings) return False
ecadd95d87984720a8e42b2134857ff8eee07e84
8,410
import signal import numpy def first_localmax_index(data): """Return index of first local maxima. If there is no local maxima (e.g. if all the values are zero), it will simply return zero. """ localmax_indexes = signal.argrelextrema(data, numpy.greater, mode='wrap') if localmax_indexes[0].size > 0: return localmax_indexes[0][0] else: return 0
7cd7baa8d564dd2175e6fd8877a733fca41e63ef
8,411
def _rds_clone_ ( dataset , name = '' ) : """Clone dataset >>> dataset = ... >>> cloned = datatset.clone ( 'new_name') """ name = name if name else dsID () return ROOT.RooDataSet ( dataset , name )
f3e3156987eafb99f05fb99a2d8371c65959faf9
8,412
def getFirstDateOfQuarter(date): """ Return: {Date} The start date of the quarter for the given date. """ # Bug if first date of the quarter is used, so add 1 if that's the case. if date.day == 1: date = date + timedelta(days=1) quarter_start = pd.to_datetime(date - pd.tseries.offsets.QuarterBegin(startingMonth=1)).date() return quarter_start
4962bd2e57fe210fba57340f19384835c0ae1c07
8,413
def abbink_onset_detector(signal=None, rest=None, sampling_rate=1000., size=None, alarm_size=None, threshold=None, transition_threshold=None): """Determine onsets of EMG pulses. Follows the approach by Abbink et al.. [Abb98]_. Parameters ---------- signal : array Input filtered EMG signal. rest : array, list, dict One of the following 3 options: * N-dimensional array with filtered samples corresponding to a rest period; * 2D array or list with the beginning and end indices of a segment of the signal corresponding to a rest period; * Dictionary with {'mean': mean value, 'std_dev': standard variation}. sampling_rate : int, float, optional Sampling frequency (Hz). size : int Detection window size (seconds). alarm_size : int Number of amplitudes searched in the calculation of the transition index. threshold : int, float Detection threshold. transition_threshold: int, float Threshold used in the calculation of the transition index. Returns ------- onsets : array Indices of EMG pulse onsets. processed : array Processed EMG signal. References ---------- .. [Abb98] Abbink JH, van der Bilt A, van der Glas HW, "Detection of onset and termination of muscle activity in surface electromyograms", Journal of Oral Rehabilitation, vol. 25, pp. 365–369, 1998 """ # check inputs if signal is None: raise TypeError("Please specify an input signal.") if rest is None: raise TypeError("Please specidy rest parameters.") if size is None: raise TypeError("Please specify the detection window size.") if alarm_size is None: raise TypeError("Please specify the number of amplitudes searched in " "the calculation of the transition index.") if threshold is None: raise TypeError("Please specify the detection threshold.") if transition_threshold is None: raise TypeError("Please specify the second threshold.") # gather statistics on rest signal if isinstance(rest, np.ndarray) or isinstance(rest, list): # if the input parameter is a numpy array or a list if len(rest) >= 2: # first ensure numpy rest = np.array(rest) if len(rest) == 2: # the rest signal is a segment of the signal rest_signal = signal[rest[0]:rest[1]] else: # the rest signal is provided as is rest_signal = rest rest_zero_mean = rest_signal - np.mean(rest_signal) statistics = st.signal_stats(signal=rest_zero_mean) mean_rest = statistics['mean'] std_dev_rest = statistics['std_dev'] else: raise TypeError("Please specify the rest analysis.") elif isinstance(rest, dict): # if the input is a dictionary mean_rest = rest['mean'] std_dev_rest = rest['std_dev'] else: raise TypeError("Please specify the rest analysis.") # subtract baseline offset signal_zero_mean = signal - np.mean(signal) # full-wave rectification fwlo = np.abs(signal_zero_mean) # moving average mvgav = np.convolve(fwlo, np.ones((size,)) / size, mode='valid') # calculate the test function tf = (1 / std_dev_rest) * (mvgav - mean_rest) # additional filter filtered_tf, _, _ = st.filter_signal(signal=tf, ftype='butter', band='lowpass', order=10, frequency=30, sampling_rate=sampling_rate) # convert from numpy array to list to use list comprehensions filtered_tf = filtered_tf.tolist() onset_time_list = [] offset_time_list = [] alarm_time = 0 onset = False alarm = False for k in range(0, len(tf)): if onset is True: # an onset was previously detected and we are looking for the offset time, applying the same criteria if alarm is False: if filtered_tf[k] < threshold: # the first index of the sliding window is used as an estimate for the onset time (simple post-processor) alarm_time = k alarm = True else: # if alarm_time > alarm_window_size and len(emg_conditioned_list) == (alarm_time + alarm_window_size + 1): if alarm_time > alarm_size and k == (alarm_time + alarm_size + 1): transition_indices = [] for j in range(alarm_size, alarm_time): low_list = [filtered_tf[j-alarm_size+a] for a in range(1, alarm_size+1)] low = sum(i < transition_threshold for i in low_list) high_list = [filtered_tf[j+b] for b in range(1, alarm_size+1)] high = sum(i > transition_threshold for i in high_list) transition_indices.append(low + high) offset_time_list = np.where(transition_indices == np.amin(transition_indices))[0].tolist() onset = False alarm = False else: # we only look for another onset if a previous offset was detected if alarm is False: if filtered_tf[k] >= threshold: # the first index of the sliding window is used as an estimate for the onset time (simple post-processor) alarm_time = k alarm = True else: # if alarm_time > alarm_window_size and len(emg_conditioned_list) == (alarm_time + alarm_window_size + 1): if alarm_time > alarm_size and k == (alarm_time + alarm_size + 1): transition_indices = [] for j in range(alarm_size, alarm_time): low_list = [filtered_tf[j-alarm_size+a] for a in range(1, alarm_size+1)] low = sum(i < transition_threshold for i in low_list) high_list = [filtered_tf[j+b] for b in range(1, alarm_size+1)] high = sum(i > transition_threshold for i in high_list) transition_indices.append(low + high) onset_time_list = np.where(transition_indices == np.amax(transition_indices))[0].tolist() onset = True alarm = False onsets = np.union1d(onset_time_list, offset_time_list) # adjust indices because of moving average onsets += int(size / 2) return utils.ReturnTuple((onsets, filtered_tf), ('onsets', 'processed'))
0ae968c46ff4e5dd5496f09b637a1d5c039ab9fa
8,414
import sys def reparse(metadata): """Some things need to be parsed again after the build environment has been created and activated.""" metadata.final = False sys.path.insert(0, metadata.config.build_prefix) sys.path.insert(0, metadata.config.host_prefix) py_ver = '.'.join(metadata.config.variant['python'].split('.')[:2]) sys.path.insert(0, utils.get_site_packages(metadata.config.host_prefix, py_ver)) metadata.parse_until_resolved() metadata = finalize_metadata(metadata) return metadata
076efe659cbbc7826c2a2159617e4142b06f6e36
8,415
def _process_image(filename, coder): """Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. coder: instance of ImageCoder to provide TensorFlow image coding utils. Returns: image_buffer: string, JPEG encoding of RGB image. height: integer, image height in pixels. width: integer, image width in pixels. """ # Read the image file. with tf.gfile.FastGFile(filename, 'rb') as f: image_data = f.read() # Convert any PNG to JPEG's for consistency. if _is_png(filename): print('Converting PNG to JPEG for %s' % filename) image_data = coder.png_to_jpeg(image_data) # Decode the RGB JPEG. image = coder.resample_jpeg(image_data) return image, RESIZE_HEIGHT, RESIZE_WIDTH
6d978af3360692159300b4450cdd851aae842098
8,416
import multiprocessing import psutil import logging import os import time import signal import errno def fork_processes_with_watchdog( num_processes, is_shutdown_callback, child_pids=None, stoploss_ratio=STOPLOSS_RATIO, timeout_period=TIMEOUT_PERIOD, sleep_period=SLEEP_PERIOD, grace_period=GRACE_PERIOD, max_rss=MAX_RSS, statsd=None, app_name='default'): """Starts multiple worker processes. If ``num_processes`` is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If ``num_processes`` is given and > 0, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the debug=True option to `tornado.web.Application`). When using multiple processes, no IOLoops can be created or referenced until after the call to ``fork_processes``. In each child process, ``fork_processes`` returns its *task id*, a number between 0 and ``num_processes``. Processes that exit abnormally (due to a signal or non-zero exit status) are restarted with the same id (up to ``max_restarts`` times). In the parent process, ``fork_processes`` returns None if all child processes have exited normally, but will otherwise only exit by throwing an exception. """ global _task_id assert _task_id is None assert multiprocessing is not None assert psutil is not None if num_processes is None or num_processes <= 0: num_processes = cpu_count() if ioloop.IOLoop.initialized(): raise RuntimeError("Cannot run in multiple processes: IOLoop instance " "has already been initialized. You cannot call " "IOLoop.instance() before calling start_processes()") logging.info("Starting %d processes", num_processes) children = {} pipes = {} last_checkin = {} processes = {} healthy = set() def send_stat(metric, val): if statsd: statsd.incr(metric,val,use_prefix=False) def start_child(i): parent_conn, child_conn = multiprocessing.Pipe(duplex=False) pid = os.fork() if pid == 0: # child process _reseed_random() global _task_id _task_id = i return i, child_conn else: # NOTE [adam Nov/11/12]: bit of a hack... lists behave as # pass-by-reference, so this lets me minimize restructuring # of this module and still get a list of child pids into # the application object if child_pids is not None: child_pids.append(pid) children[pid] = i last_checkin[pid] = time.time() + grace_period pipes[pid] = (parent_conn, child_conn) processes[pid] = psutil.Process(pid) logging.info("Started new child process with number %d pid %d", i, pid) return None, None def cleanup_pid(pid, remove_from_child_pids=True): if remove_from_child_pids and pid in child_pids: child_pids.remove(pid) if pid in pipes: parent_pipe, child_pipe = pipes[pid] try: parent_pipe.close() except Exception: logging.exception("Problem while closing pipe for "\ "pid %d", pid) try: child_pipe.close() except Exception: logging.exception("Problem while closing pipe for "\ "pid %d", pid) pipes.pop(pid, None) last_checkin.pop(pid, None) children.pop(pid, None) processes.pop(pid, None) if pid in healthy: healthy.remove(pid) def stoplossed(): if float(len(healthy)) / num_processes < stoploss_ratio: return True return False for i in range(num_processes): _id, child_conn = start_child(i) if _id is not None: return _id, child_conn # for keeping track of which processes we've gracefully killed # due to memory issues sent_term = set() while children: try: # Try to respawn any children that don't exist yet # this can happen during bad site issues where a process # dies because it can't connect to mongo, etc and then gets # reaped by our upstart checker. This will put us in a period # of permanent stoploss, so to avoid that we respawn here # Only execute if we're not shutting down. if not is_shutdown_callback(): alive_children = set(children.values()) children_to_spawn = set(range(num_processes)) - alive_children for child_number in children_to_spawn: _id, child_conn = start_child(child_number) if _id is not None: return _id, child_conn else: # there's a race where we can spawn children after the tornado # app has received the shutdown signal. This will ensure we will # eventually shutdown. Should happen very rarely for pid, child_number in children.iteritems(): try: os.kill(pid, signal.SIGTERM) if pid in healthy: healthy.remove(pid) send_stat('infra.frontend.%s.shutdown_term' % app_name,1) except OSError: logging.exception( "Failed to terminate pid %d process number %d "\ "while shutting down", pid, child_number) # for all the processes we've sent SIGTERM, make sure they are # not running anymore - if they are escalate to SIGKILL. # Shouldn't need to check for stoploss, since len(sent_term) # must be < what would put us over the stoploss # We need to do this first so that we don't mistakenly think that # upstart is trying to shut us down since that also sends a SIGTERM # to the child processes for i, (pid, child_number) in enumerate(sent_term): if pid not in processes: continue if processes[pid].is_running(): logging.info("Trying to kill pid %d process number %d", pid, child_number) try: os.kill(pid, signal.SIGKILL) send_stat('infra.frontend.%s.kill' % app_name,1) except OSError: logging.exception("Failed to kill pid %d process number %d", pid, child_number) logging.info("Trying to wait on pid %d process number %d", pid, child_number) try: os.waitpid(pid, os.WNOHANG) except OSError: logging.exception("Failed to wait on pid %d process number %d", pid, child_number) cleanup_pid(pid) if not is_shutdown_callback(): _id, child_conn = start_child(child_number) if _id is not None: return _id, child_conn # reset this sent_term = set() # Detect if we're being shut down by upstart, in which case # we no longer want to respawn child processes that have died # also detect if the child has exited on its own (died on startup) to_cleanup = set() for pid, _ in children.iteritems(): try: _resp_pid, status = os.waitpid(pid, os.WNOHANG) except OSError as e: if e.errno == errno.EINTR: continue if e.errno != errno.ECHILD: raise # Still alive if status == 0: continue to_cleanup.add(pid) logging.info("Detected that pid %d died", pid) for pid in to_cleanup: cleanup_pid(pid, remove_from_child_pids=False) # If we have child processes, see if they're up and running to_reap = set() exceed_mem = set() for pid, child_number in children.iteritems(): parent_pipe, child_pipe = pipes[pid] got_msg = False # get the last message that was sent try: while parent_pipe.poll(): status = parent_pipe.recv() got_msg = True except Exception: logging.exception("Problem while polling pipe for "\ "pid %d process number %d", pid, child_number) if got_msg: if not status: logging.info( "Empty status message from pid %d process number %d", pid, child_number) if 'checkin_time' not in status: logging.info( "Check in time not reported from pid %d process number %d", pid, child_number) last_checkin[pid] = time.time() healthy.add(pid) else: last_checkin[pid] = status['checkin_time'] healthy.add(pid) if time.time() - last_checkin[pid] > timeout_period: to_reap.add((pid, child_number)) logging.info( "Scheduling pid %d process number %d to be reaped "\ 'for failing to check in after %d seconds', pid, child_number, timeout_period) try: # only terminate if it's after grace period if processes[pid].create_time() + grace_period < time.time(): rss, _ = processes[pid].memory_info()[:2] if rss > max_rss: exceed_mem.add((pid, child_number)) logging.info( "Scheduling pid %d process number %d to be reaped "\ 'for exceeding MAX_RSS (%dMB/%dMB)', pid, child_number, rss / (1024 * 1024), max_rss / (1024 * 1024)) except Exception: logging.exception( "Unable to get RSS from pid %d process number %d", pid, child_number) # Reap the child processes that are stuck for i, (pid, child_number) in enumerate(to_reap): if stoplossed(): logging.info( "Not enough tornadoes healthy, stoploss initiated.") send_stat('infra.frontend.%s.stoplossed' % app_name,1) break logging.info("Trying to gracefully terminate pid "\ "%d process number %d", pid, child_number) try: os.kill(pid, signal.SIGTERM) sent_term.add((pid, child_number)) if pid in healthy: healthy.remove(pid) send_stat('infra.frontend.%s.term' % app_name,1) except OSError: logging.exception("Failed to terminate pid %d process number %d", pid, child_number) # if its timed out, we've already termed it exceed_mem -= to_reap for i, (pid, child_number) in enumerate(exceed_mem): if stoplossed(): logging.info( "Not enough tornadoes healthy, stoploss initiated.") send_stat('infra.frontend.%s.stoplossed_mem' % app_name,1) break logging.info("Trying to gracefully terminate pid %d process number %d", pid, child_number) try: os.kill(pid, signal.SIGTERM) sent_term.add((pid, child_number)) if pid in healthy: healthy.remove(pid) send_stat('infra.frontend.%s.term_mem' % app_name,1) except OSError: logging.exception("Failed to terminate pid %d process number %d", pid, child_number) time.sleep(sleep_period) except Exception: logging.exception("Unhandled error in watchdog loop") send_stat('infra.frontend.%s.unhandled_exception' % app_name,1) return None, None
11fa328146865ca212436f7380d7895e9b62d7be
8,417
import os def load_fonts(folder="fonts/latin"): """Load all fonts in the fonts directories """ fonts = [] if folder is not None: if os.path.isdir(folder): # the folder exists whether it is relative or absolute path for font in os.listdir(folder): if font.split(".")[-1].lower() in ["ttf", "otf"]: fonts.append(os.path.join(folder, font)) return fonts elif os.path.isdir(os.path.join(os.path.dirname(__file__), folder)): # we are working with base folder of this library for font in os.listdir(os.path.join(os.path.dirname(__file__), folder)): if font.split(".")[-1].lower() in ["ttf", "otf"]: fonts.append(os.path.join(os.path.dirname(__file__), folder, font)) return fonts raise Exception("No font folder specified/found!")
87e15b826e99b3d350fcb4ad8e58ac968644a4d0
8,418
def float_feature(value): """Wrapper for inserting float features into Example proto. """ if not isinstance(value,list): value = [value] return tf.train.Feature(float_list=tf.train.FloatList(value=value))
9333af60465a251883b3efe70de26ce9ce483657
8,419
def my_charts(request): """ define personal graphics page behavior """ data = [0, 0, 0, 0] if request.method == 'POST': month = request.POST.get('month') if month is not None: current_user_id = request.user.id_user if month == 'all': all_classifications = list(ClinicalState_28d. objects.all()) + \ list(ClinicalState_29d_2m. objects.all()) + \ list(ClinicalState_2m_3y. objects.all()) + \ list(ClinicalState_3y_10y. objects.all()) + \ list(ClinicalState_10yMore. objects.all()) else: all_classifications = list(ClinicalState_28d.objects. filter(date__month=month)) + \ list(ClinicalState_29d_2m.objects. filter(date__month=month)) + \ list(ClinicalState_2m_3y.objects. filter(date__month=month)) + \ list(ClinicalState_3y_10y.objects. filter(date__month=month)) + \ list(ClinicalState_10yMore.objects. filter(date__month=month)) for classification in all_classifications: if classification.classifier_id == current_user_id: patient_classification = \ classification.patient.classification if patient_classification == 1: data[0] += 1 elif patient_classification == 2: data[1] += 1 elif patient_classification == 3: data[2] += 1 elif patient_classification == 4: data[3] += 1 return render(request, 'users/myCharts.html', { 'data': data })
adc11d0748246c753581675eee19b2780b16b832
8,420
def matrix2yzy_extrinsic(rotation_matrices: np.ndarray) -> np.ndarray: """ Ry(k3) @ Rz(k2) @ Ry(k1) = [[c1c2c3-s1s3, -s2c3, s1c2c3+c1c3], [c1s2, c2, s1s2], [-c1c2s3, s2s3, -s1c2s3+c1c3]] """ rotation_matrices = rotation_matrices.reshape((-1, 3, 3)) angles_radians = np.zeros((rotation_matrices.shape[0], 3)) # Angle 2 can be taken directly from matrices angles_radians[:, 1] = np.arccos(rotation_matrices[:, 1, 1]) # Gimbal lock case (s2 = 0) tolerance = 1e-4 # Find indices where this is the case gimbal_idx = np.abs(rotation_matrices[:, 1, 0]) < tolerance # Calculate angle 1 and set angle 3 = 0 for those indices r31 = rotation_matrices[gimbal_idx, 2, 0] r33 = rotation_matrices[gimbal_idx, 2, 2] angles_radians[gimbal_idx, 0] = np.arctan2(-r31, r33) angles_radians[gimbal_idx, 2] = 0 # Normal case (s2 > 0) idx = np.invert(gimbal_idx) r23 = rotation_matrices[idx, 1, 2] r21 = rotation_matrices[idx, 1, 0] r32 = rotation_matrices[idx, 2, 1] r12 = rotation_matrices[idx, 0, 1] angles_radians[idx, 0] = np.arctan2(r23, r21) angles_radians[idx, 2] = np.arctan2(r32, -r12) # convert to degrees euler_angles = np.rad2deg(angles_radians) return euler_angles
8a37e65751a26d3fd5c360ce9068626bfee5c594
8,421
def smallest_subarray_with_given_sum(arr, s): """Find the length of the smallest subarray whose sum is >= s. Time: O(n) Space: O(1) >>> smallest_subarray_with_given_sum([2, 1, 5, 2, 3, 2], 7) 2 >>> smallest_subarray_with_given_sum([2, 1, 5, 2, 8], 7) 1 >>> smallest_subarray_with_given_sum([3, 4, 1, 1, 6], 8) 3 """ win_sum = 0 win_start = 0 min_len = 0 for win_end in range(len(arr)): win_sum += arr[win_end] while win_sum >= s: cur_len = win_end - win_start + 1 if min_len == 0 or cur_len < min_len: min_len = cur_len win_sum -= arr[win_start] win_start += 1 return min_len
4a1d63619fc200c32ffae80dc7d404f486efcdd1
8,422
from typing import OrderedDict def create_lit_model( model: str, input_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821 output_types: "OrderedDict[str, lit_types.LitType]", # noqa: F821 attribution_method: str = "sampled_shapley", ) -> lit_model.Model: """Creates a LIT Model object. Args: model: Required. A string reference to a local TensorFlow saved model directory. The model must have at most one input and one output tensor. input_types: Required. An OrderedDict of string names matching the features of the model as the key, and the associated LitType of the feature. output_types: Required. An OrderedDict of string names matching the labels of the model as the key, and the associated LitType of the label. attribution_method: Optional. A string to choose what attribution configuration to set up the explainer with. Valid options are 'sampled_shapley' or 'integrated_gradients'. Returns: A LIT Model object that has the same functionality as the model provided. """ return _TensorFlowLitModel(model, input_types, output_types, attribution_method)
355eaebe6e59733d1831f993d56462ee36e4ff9a
8,423
from typing import List from typing import Dict from typing import OrderedDict def show_lightning_round_zero_correct(database_connection: mysql.connector.connect ) -> List[Dict]: """Return list of shows in which a panelist answers zero Lightning Fill-in-the-Blank round questions correct""" cursor = database_connection.cursor(dictionary=True) query = ("SELECT s.showid, s.showdate, p.panelistid, p.panelist, " "pm.panelistlrndstart, pm.panelistlrndcorrect, " "pm.panelistscore, pm.showpnlrank " "FROM ww_showpnlmap pm " "JOIN ww_shows s ON s.showid = pm.showid " "JOIN ww_panelists p ON p.panelistid = pm.panelistid " "WHERE s.bestof = 0 AND s.repeatshowid IS null " "AND pm.panelistlrndcorrect = 0 " "ORDER BY s.showdate ASC;") cursor.execute(query) result = cursor.fetchall() cursor.close() if not result: return None shows = [] for row in result: show = OrderedDict() show["id"] = row["showid"] show["date"] = row["showdate"].isoformat() panelist = OrderedDict() panelist["id"] = row["panelistid"] panelist["name"] = row["panelist"] panelist["start"] = row["panelistlrndstart"] panelist["correct"] = row["panelistlrndcorrect"] panelist["score"] = row["panelistscore"] panelist["rank"] = row["showpnlrank"] show["panelist"] = panelist shows.append(show) return shows
5c218639fd2321239d9f791221f2ad30f17ead02
8,424
import requests def get_webpage(page_url): """Get the OOTS index webpage and return the content.""" result = requests.get(page_url) if result.status_code == 200: return result.text else: _logger.error( colored( "Unable to read the OOTS data,please check your connection.", "red", attrs=["bold"], ) ) _logger.error(colored(f"URL : {page_url}", "red")) quit(1)
e9b88f69b9dca0d5cf525a26e7e43fd118698225
8,425
def generate_config(context): """ Entry point for the deployment resources. """ properties = context.properties name = properties.get('name', context.env['name']) bastion_props = { 'zone': properties['zone'], 'network': properties['network'], 'machineType': properties['machineType'], 'diskImage': IMAGE } bastion = {'name': name, 'type': 'instance.py', 'properties': bastion_props} optional_props = ['diskSizeGb', 'metadata', 'tags'] for prop in optional_props: set_optional_property(bastion_props, properties, prop) if properties.get('disableSudo'): disable_sudo(bastion_props) firewall_settings = properties.get('createFirewallRules') if firewall_settings: extra_resources, extra_outputs = create_firewall_rules( bastion, firewall_settings ) else: extra_resources = [] extra_outputs = [] outputs = [ { 'name': 'name', 'value': name }, { 'name': 'selfLink', 'value': '$(ref.{}.selfLink)'.format(name) }, { 'name': 'internalIp', 'value': '$(ref.{}.internalIp)'.format(name) }, { 'name': 'externalIp', 'value': '$(ref.{}.externalIp)'.format(name) } ] return { 'resources': [bastion] + extra_resources, 'outputs': outputs + extra_outputs }
f22dbb3cb3500766badb6c28eb3de35b6ba5ba3c
8,426
import inspect def dump_args(func): """Decorator to print function call details - parameters names and effective values. """ def wrapper(*args, **kwargs): func_args = inspect.signature(func).bind(*args, **kwargs).arguments func_args_str = ', '.join('{} = {!r}'.format(*item) for item in func_args.items()) print(f'{func.__module__}.{func.__qualname__} ( {func_args_str} )') return func(*args, **kwargs) return wrapper
673158019aa3a8343718b9648b61ef4a3699f050
8,427
from typing import Tuple def bigaussian( n_particles: int, mean: Tuple[float, float, float, float, float], geometric_emittance_h: float, geometric_emittance_v: float, sigma_p: float, ) -> np.array: """Generate a bigaussian distributed distribution. Args: n_particles: Number of particles. meam: Distribution centers. geometric_emittance: Geometric emittance. sigma_p: Absolute momentum spread. Returns: Array of position and angle phase space coordinates of the distribution. """ cov = np.diag( ( geometric_emittance_h, geometric_emittance_h, geometric_emittance_v, geometric_emittance_v, sigma_p ** 2, ) ) return np.random.multivariate_normal(mean, cov, n_particles).T
a8c7b9cf7500fde899cdcc163a31450b59d0d7d0
8,428
def horizontal_tile(silhouette, reps = 2): """Places two silhouettes side-by-side with an empty line in the middle.""" silhouette = np.append(silhouette,np.zeros((silhouette.shape[0],1)),axis=1) return np.tile(silhouette,(1,reps))[:,:]
ddccc0ff9cb7f1fba56dfc52de723f5253729952
8,429
def grads_norm(parameters): """get grad norms of the parameters, useful for model inspection""" t = [p.grad for p in parameters if p.grad is not None] return many_l2_norm(*t)
9904f5313f63387ba0c4f139029759118f2ecae8
8,430
def django_admin_add_object(request, context): """show add object""" if request and request.user.is_staff and (context.get('object', None) or context.get('model', None)): object_class = context.get('model', None) if not object_class: object_class = context['object'].__class__ view_name = 'admin:{0}_{1}_add'.format(get_model_app(object_class), get_model_name(object_class)) try: return make_link( reverse(view_name), _('Add {0}').format(get_model_label(object_class)), 'table', classes=['icon', 'alert_on_click'] ) except NoReverseMatch: pass
909c79cc75913afff341eed84286edd79352fc0c
8,431
def get_config(): """Returns an instance of the configured config class. :return: Project's defined Adyen configuration. :rtype: :class:`AbstractAdyenConfig` By default, this function will return an instance of :class:`adyen.settings_config.FromSettingsConfig`. If :data:`ADYEN_CONFIG_CLASS` is defined, it will try to load this class and return an instance of this class instead. .. note:: This function expects :data:`ADYEN_CONFIG_CLASS` to be a string that represent the python import path of the Adyen config class, such as ``adyen.settings_config.FromSettingsConfig``. """ try: config_class_string = settings.ADYEN_CONFIG_CLASS except AttributeError: config_class_string = 'adyen.settings_config.FromSettingsConfig' return import_string(config_class_string)()
16a26bd31752211d2aa7a22858f0317ba90b5bad
8,432
import math def aperiodic(amp, samples): """an aperiodic oscillating signal Parameters ---------- amp : float values range over +-amp samples : int number of samples to generate Returns ------- ndarray """ periods = np.abs(sine(samples, samples, 1)) + samples / 10 seq = [amp * math.sin(i * 2 * math.pi / periods[i]) for i in range(samples)] return np.array(seq)
42428a16fbfee7cf2a9d8b566fc122e9c56b7e6a
8,433
import base64 def removeHs(ctab): """ Removes any hydrogens from the graph of a molecule. CTAB is urlsafe_base64 encoded string containing single molfile or concatenation of multiple molfiles. cURL examples: curl -X GET ${BEAKER_ROOT_URL}removeHs/$(cat removeHs.mol | base64 -w 0 | tr "+/" "-_") curl -X GET ${BEAKER_ROOT_URL}removeHs/$(cat removeHs.mol | base64 -w 0 | tr "+/" "-_")?implicitOnly=1 """ data = base64.urlsafe_b64decode(ctab) return removeHsView(data, request.params)
f3064e2ce3a1db1bb260da49da2df3fb1eaf1310
8,434
def judge_key(key: str, up: any): """判断key是否存在""" if dict == type(up): if key in up: return True else: for dict_key, dict_value in up.items(): if dict == type(dict_value) or list == type(dict_value): result = judge_key(key, dict_value) if result: return result return False elif list == type(up): for dict_value in up: if dict == type(dict_value) or list == type(dict_value): result = judge_key(key, dict_value) if result: return result return False else: return False
ee0086259343df30cfc7b72951a165b557a843f9
8,435
def apply_changes(patch_obj_dic, file_dic): """ If all checks are passed, write the changes to the patch file. Note that the original file is overwritten :return: """ success = False error_title = None error_msg = None # Checks that mutually exclusive options have not been set together. If they have, alert the user, # and abort before writing to file(s) for (fn, patch_obj_list) in iterDic(patch_obj_dic): mut_exl_dic = {} for obj in patch_obj_list: if obj.group and 'yes' in obj.status: if obj.group not in mut_exl_dic: mut_exl_dic[obj.group] = [] mut_exl_dic[obj.group].append(obj.name) else: mut_exl_dic[obj.group].append(obj.name) for (group, names) in iterDic(mut_exl_dic): if len(names) > 1: name_str = '\n' for name in names: name_str += ' ' + name + '\n' error_title = 'Mutually Exlusive Options Detected!' error_msg = 'The following options cannot be enabled together: \n' + name_str + \ fn + ' was not written.' success = False return success, error_title, error_msg # If checks passed, prepare and then write data to file(s) for (fn, patch_obj_list) in iterDic(patch_obj_dic): for obj in patch_obj_list: file_dic = prep_for_writing(fn, obj, file_dic) r_p_f_success, error_title, error_msg = write_patch_files(fn, file_dic) if not r_p_f_success: success = False return success, error_title, error_msg success = True return success, error_title, error_msg
ad52721ab338b0124869c32c2e08f202deeb981f
8,436
def construct_1D_scan_fast(gate, swing, n_pt, t_step, biasT_corr, pulse_lib, digitizer, channels, dig_samplerate, dig_vmax=2.0, iq_mode=None, acquisition_delay_ns=None, enabled_markers=[], channel_map=None, pulse_gates={}, line_margin=0): """ 1D fast scan parameter constructor. Args: gate (str) : gate/gates that you want to sweep. swing (double) : swing to apply on the AWG gates. [mV] n_pt (int) : number of points to measure (current firmware limits to 1000) t_step (double) : time in ns to measure per point. [ns] biasT_corr (bool) : correct for biasT by taking data in different order. pulse_lib : pulse library object, needed to make the sweep. digitizer : digitizer object channels : digitizer channels to read dig_samplerate : digitizer sample rate [Sa/s] iq_mode (str or dict): when digitizer is in MODE.IQ_DEMODULATION then this parameter specifies how the complex I/Q value should be plotted: 'I', 'Q', 'abs', 'angle', 'angle_deg'. A string applies to all channels. A dict can be used to specify selection per channel, e.g. {1:'abs', 2:'angle'}. Note: channel_map is a more generic replacement for iq_mode. acquisition_delay_ns (float): Time in ns between AWG output change and digitizer acquisition start. This also increases the gap between acquisitions. enable_markers (List[str]): marker channels to enable during scan channel_map (Dict[str, Tuple(int, Callable[[np.ndarray], np.ndarray])]): defines new list of derived channels to display. Dictionary entries name: (channel_number, func). E.g. {(ch1-I':(1, np.real), 'ch1-Q':(1, np.imag), 'ch3-Amp':(3, np.abs), 'ch3-Phase':(3, np.angle)} The default channel_map is: {'ch1':(1, np.real), 'ch2':(2, np.real), 'ch3':(3, np.real), 'ch4':(4, np.real)} pulse_gates (Dict[str, float]): Gates to pulse during scan with pulse voltage in mV. E.g. {'vP1': 10.0, 'vB2': -29.1} line_margin (int): number of points to add to sweep 1 to mask transition effects due to voltage step. The points are added to begin and end for symmetry (bias-T). Returns: Parameter (QCODES multiparameter) : parameter that can be used as input in a conversional scan function. """ vp = swing/2 # set up sweep voltages (get the right order, to compenstate for the biasT). voltages_sp = np.linspace(-vp,vp,n_pt) if biasT_corr: m = (n_pt+1)//2 voltages = np.zeros(n_pt) voltages[::2] = voltages_sp[:m] voltages[1::2] = voltages_sp[m:][::-1] else: voltages = voltages_sp return dummy_digitzer_scan_parameter(digitizer, None, pulse_lib, t_step, (n_pt, ), (gate, ), ( tuple(voltages_sp), ), biasT_corr, 500e6)
cbbffe77187cfd923b1e9b5982fb1e2b6319a854
8,437
def assemble_f_local(ck, f_func, p1, p2, p3): """ Assemble the local contribution to the f_load_lv for the element Parameters ---------- ck : np.array basis function coef. matrix. f_func : function load function. p1 : np.array first vertex of the triangle element. p1 : np.array second vertex of the triangle element. p1 : np.array third vertex of the triangle element. Returns ------- np.array local contribution to f_load_lv. """ f_local = np.zeros(6, dtype=float) for ki in range(6): i, di = inv_index_map(ki) def f_phi(x, y): return f_func(x, y)[:, di] * phi(x, y, ck, i) f_local[ki] = quadrature2D(p1, p2, p3, 4, f_phi) return f_local
9912026fbde63b0cf6780a8b3fc8131dbc99c809
8,438
def take_turn(num_rolls, opponent_score, dice=six_sided): """Simulate a turn rolling NUM_ROLLS dice, which may be 0 (Free Bacon). Return the points scored for the turn by the current player. Also implements the Hogtimus Prime rule. num_rolls: The number of dice rolls that will be made. opponent_score: The total score of the opponent. dice: A function that simulates a single dice roll outcome. """ # Leave these assert statements here; they help check for errors. assert type(num_rolls) == int, 'num_rolls must be an integer.' assert num_rolls >= 0, 'Cannot roll a negative number of dice in take_turn.' assert num_rolls <= 10, 'Cannot roll more than 10 dice.' assert opponent_score < 100, 'The game should be over.' # BEGIN PROBLEM 2 "*** REPLACE THIS LINE ***" roll_score=0 if num_rolls==0: roll_score=free_bacon(opponent_score) else: roll_score=roll_dice(num_rolls,dice) if is_prime(roll_score): return next_prime(roll_score) else: return roll_score # END PROBLEM 2
f072341dde309a7b612da896d2db348c92a7f0c4
8,439
def __sort_vertices(points): """Return vertices that are sorted by average center of all points.""" points = list(set(points)) if len(points) < 3: return None start_point = __find_average_center(points) start_vector = Vector3D.by_points(start_point, points[0]) return sorted(points, key=lambda point: GeometryUtils.angle_between( start_vector, Vector3D.by_points(start_point, point)))
b89374b1b8e06c3bcc87b074239c1cc13ecd7de4
8,440
from typing import List from typing import Tuple def create_feature( tokens: List[str], label_ids: List[int], words_map: List[Tuple[int, int, bool]], max_seq_length: int, tokenizer: PreTrainedTokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, pad_token_segment_id=0, pad_token_label_id=-100, sequence_a_segment_id=0, mask_padding_with_zero=True, words_map_pad=(-1, -1, True) ) -> Tuple[InputFeatures, List[Tuple[int, int, str]]]: """ `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = tokenizer.num_special_tokens_to_add() # if len(tokens) > max_seq_length - special_tokens_count: # tokens = tokens[: (max_seq_length - special_tokens_count)] # label_ids = label_ids[: (max_seq_length - special_tokens_count)] assert (len(tokens) <= max_seq_length - special_tokens_count) # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] words_map += [words_map_pad] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] words_map += [words_map_pad] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] words_map += [words_map_pad] else: tokens = [cls_token] + tokens label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids words_map = [words_map_pad] + words_map input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids words_map = ([words_map_pad] * padding_length) + words_map else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length words_map += [words_map_pad] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length assert len(words_map) == max_seq_length if "token_type_ids" not in tokenizer.model_input_names: segment_ids = None return InputFeatures( input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, label_ids=label_ids, ), words_map
7dccdf98a07ec254abc14c15bba4c5ea307e8f2b
8,441
import os def unified_genotyper(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Perform SNP genotyping on the given alignment file. """ if out_file is None: out_file = "%s-variants.vcf" % os.path.splitext(align_bams[0])[0] if not file_exists(out_file): broad_runner, params = \ _shared_gatk_call_prep(align_bams, ref_file, items[0]["config"], assoc_files["dbsnp"], region, out_file) if (not isinstance(region, (list, tuple)) and not all(has_aligned_reads(x, region) for x in align_bams)): vcfutils.write_empty_vcf(out_file) else: with file_transaction(out_file) as tx_out_file: params += ["-T", "UnifiedGenotyper", "-o", tx_out_file, "--genotype_likelihoods_model", "BOTH"] broad_runner.run_gatk(params) return out_file
65d96b87b31508816ed3e87a53145605004d7f6e
8,442
import os def read_and_compress_table(parser, table, debug): """Read data from a FITS file and save it into a FITS binary table The data are read from the FITS file specified in the "table" section of the "parser" object (an instance of ConfigurationParser). If "debug" is true, save additional information (useful for debugging) in the table.""" input_file_name = os.path.normpath(parser.get(table, 'file')) cur_hdu = None with pyfits.open(input_file_name) as input_file: hdu, column = get_hdu_and_column_from_schema(parser, table, input_file) compression = parser.get(table, 'compression') log.info('compressing file %s (HDU %s, column %s) ' 'into table "%s", ' 'compression is "%s"', input_file_name, str(hdu), str(column), table, compression) samples_format = input_file[hdu].columns.formats[column] samples = ppc.to_native_endianness(input_file[hdu].data.field(column)) if parser.has_option(table, 'datatype'): samples = np.array(samples, dtype=parser.get(table, 'datatype')) cur_hdu, num_of_bytes, elapsed_time = compress_to_FITS_table(parser, table, samples_format, samples, debug) cur_hdu.name = table cur_hdu.header['PCNUMSA'] = (len(samples), 'Number of uncompressed samples') cur_hdu.header['PCCOMPR'] = (compression, 'Polycomp compression algorithm') cur_hdu.header['PCSRCTP'] = (str(samples.dtype), 'Original NumPy type of the data') cur_hdu.header['PCUNCSZ'] = (samples.itemsize * samples.size, 'Size of the uncompressed data [bytes]') cur_hdu.header['PCCOMSZ'] = (num_of_bytes, 'Size of the compressed data [bytes]') cur_hdu.header['PCTIME'] = (elapsed_time, 'Time used for compression [s]') cr = float(cur_hdu.header['PCUNCSZ']) / float(cur_hdu.header['PCCOMSZ']) cur_hdu.header['PCCR'] = (cr, 'Compression ratio') log.info('table "%s" compressed, %s compressed to %s (cr: %.4f)', table, humanize_size(cur_hdu.header['PCUNCSZ']), humanize_size(cur_hdu.header['PCCOMSZ']), cr) return cur_hdu
3533dd91f48cdba2064314dcec98c511443b319e
8,443
from scipy.stats import norm, uniform def calculate_log_likelihood_and_derivative_at_parameter_point_with_mRNA(protein_at_observations,model_parameters,mean_protein,measurement_variance,mRNA_parameters): """ Calculates the log of the likelihood, and the derivative of the negative log likelihood wrt each parameter, of our data given the paramters, using the Kalman filter. It uses the predicted_observation_distributions, predicted_observation_mean_derivatives, and predicted_observation_variance_derivatives from the kalman_filter function. It returns the log likelihood as in the calculate_log_likelihood_at_parameter_point function, and also returns an array of the derivative wrt each parameter. Parameters ---------- protein_at_observations : numpy array. Observed protein. The dimension is n x 2, where n is the number of observation time points. The first column is the time, and the second column is the observed protein copy number at that time. model_parameters : numpy array. An array containing the moderowl parameters in the following order: repression_threshold, hill_coefficient, mRNA_degradation_rate, protein_degradation_rate, basal_transcription_rate, translation_rate, transcription_delay. mean_protein : float. The mean protein value, used to set prior bounds for the repression threshold measurement_variance : float. The variance in our measurement. This is given by Sigma_e in Calderazzo et. al. (2018). mRNA_parameters : numpy array. two element array, mean and standard deviation of the mRNA distribution Returns ------- log_likelihood : float. The log of the likelihood of the data. log_likelihood_derivative : numpy array. The derivative of the log likelihood of the data, wrt each model parameter """ number_of_parameters = model_parameters.shape[0] if ((uniform(50,2*mean_protein-50).pdf(model_parameters[0]) == 0) or (uniform(2,6-2).pdf(model_parameters[1]) == 0) or (uniform(np.log(2)/150,np.log(2)/10 - np.log(2)/150).pdf(model_parameters[2]) == 0) or (uniform(np.log(2)/150,np.log(2)/10 - np.log(2)/150).pdf(model_parameters[3]) == 0) or (uniform(0.01,120-0.01).pdf(model_parameters[4]) == 0) or (uniform(0.01,40-0.01).pdf(model_parameters[5]) == 0) or (uniform(1,40-1).pdf(model_parameters[6]) == 0) ): return -np.inf, np.zeros(number_of_parameters) state_space_mean, _, _, _, predicted_observation_distributions, predicted_observation_mean_derivatives, predicted_observation_variance_derivatives = kalman_filter(protein_at_observations, model_parameters, measurement_variance, derivative=True) mean_mRNA = np.mean(state_space_mean[:,1]) # calculate log likelihood as before if protein_at_observations.reshape(-1,2).shape[0] == 1: number_of_observations = 1 observations = [protein_at_observations[1]] else: number_of_observations = protein_at_observations.shape[0] observations = protein_at_observations[:,1] mean = predicted_observation_distributions[:,1] sd = np.sqrt(predicted_observation_distributions[:,2]) # add mRNA penalty log_likelihood = np.sum(norm.logpdf(observations,mean,sd)) + norm.logpdf(mean_mRNA, mRNA_parameters[0], mRNA_parameters[1]) # now for the computation of the derivative of the negative log likelihood. An expression of this can be found # at equation (28) in Mbalawata, Särkkä, Haario (2013) observation_transform = np.array([[0.0,1.0]]) helper_inverse = 1.0/predicted_observation_distributions[:,2] log_likelihood_derivative = np.zeros(number_of_parameters) for parameter_index in range(number_of_parameters): for time_index in range(number_of_observations): log_likelihood_derivative[parameter_index] -= 0.5*(helper_inverse[time_index]*np.trace(observation_transform.dot( predicted_observation_variance_derivatives[time_index,parameter_index].dot( np.transpose(observation_transform)))) - helper_inverse[time_index]*np.transpose(observation_transform.dot( predicted_observation_mean_derivatives[time_index,parameter_index]))[0]* (observations[time_index] - mean[time_index]) - np.power(helper_inverse[time_index],2)*np.power(observations[time_index] - mean[time_index],2)* observation_transform.dot( predicted_observation_variance_derivatives[time_index,parameter_index].dot( np.transpose(observation_transform))) - helper_inverse[time_index]*(observations[time_index] - mean[time_index])* observation_transform.dot(predicted_observation_mean_derivatives[time_index,parameter_index])[0]) return log_likelihood, log_likelihood_derivative
9d187b23c5a56e15c2bd900242823e5780991f7c
8,444
def SEORedirectMiddleware(get_response): """ Intercepts 404 errors and checks the database for any defined redirecs that match the current request path. """ def middleware(request): response = get_response(request) if response.status_code != 404: return response try: r = Redirect.objects.get(url=normalize_url(request.path)) except Redirect.DoesNotExist: return response to = r.target_url kwargs = dict(permanent=r.is_permanent) if r.with_query_string: to = modify_url_query_string(to, replace=request.GET.dict()) return redirect(to, **kwargs) return middleware
750e8f3603114c8a6474f2fdfde76cefea1eacf7
8,445
def b2s(src): """ Convert from bytes to string :param src: bytes :return: string """ return src.decode(encoding=UTF_ENCODING)
3a71fe7684ce57833db6861a250b0ba1d5fbfd47
8,446
def get_finetune_lfo_type(header: bytes) -> AutomationLfoType: """Return finetune LFO type.""" assert isinstance(value := _unpack(header, "FINETUNE_LFO_TYPE"), int), type(value) return AutomationLfoType(value)
2dcaf71353d0641dd1f0cac1bf83ecc720cd666b
8,447
def locate(client: Client, structure: Structure) -> str: """Locates the respective structure.""" return client.run('locate', structure)
93e2d83c6be7cc5d4a628233ccdadda2f9a914a5
8,448
def teraflops_for_accelerator(accel): """ Stores the number of TFLOPs available to a few accelerators, including driver handicaps. Args: accel (str): A string descriptor of which accelerator to use. Must be either "3090" or "V100". Returns: accel_flops (int): an integer of how many TFLOPs are in the accelerator. """ accel_flops = {"3090": 71, "V100": 125} return accel_flops[accel]
a491beb06baf73325e2e7b5f0876e98ea312e2aa
8,449
def reduced_supercell_vectors(ab, n): """ Returns all possible reduced in-plane lattice vectors and transition matrices for the given starting unit cell lattice vectors(ab) and the supercell size n Args: ab: a, b lattice vectors n: (int) supercell size """ uv_list = [] tm_list = [] for r_tm in get_trans_matrices(n): uv = get_uv(ab, r_tm) uv_r, tm0 = get_reduced_uv(uv, r_tm) uv_list.append(uv_r) tm_list.append(tm0) return uv_list, tm_list
fba60388b42beb170bfba96a9aeeccc4e1d74dbf
8,450
import json def jsonify(*args, **kwargs): """Creates a `Response` with the JSON representation of the given arguments with an`application/json` mimetype. The arguments to this function are the same as to the `dict` constructor. Example usage: from cocopot import jsonify @app.route('/_get_current_user') def get_current_user(): return jsonify(username=g.user.username, email=g.user.email, id=g.user.id) This will send a JSON response like this to the browser: { "username": "admin", "email": "admin@localhost", "id": 42 } """ indent = None separators = (',', ':') rv = Response(json.dumps(dict(*args, **kwargs), indent=indent, separators=separators), content_type='application/json') return rv
04fe7d2808081f9a9f9b7eb610e168c8329298cb
8,451
def logged_in_profile(client): """Add a Profile and logged-in User""" user = UserFactory.create(username="george") client.force_login(user) return user.profile
b4ed5872cf8da789f3e6ab001b8afc556c0faa50
8,452
def get_storage_backend(): """ Return the singleton instance of the storage backend in use. """ global _STORAGE_BACKEND if _STORAGE_BACKEND is None: module, klass = ClassLoader.split(str(config.STORAGE_BACKEND_CLASS)) cl = ClassLoader(module, klass, config.STORAGE_BACKEND_ARGS) _STORAGE_BACKEND = cl.get_instance() return _STORAGE_BACKEND
d23315854bf736637f483f1d802b868d4c45ff8a
8,453
def _pr_compile(regex, cleanup=None): """Prepare a 2-tuple of compiled regex and callable.""" return (_re_compile(regex), cleanup)
832e794bef679272231e336aa2128cf1457abb8d
8,454
def config(): """ Configuration via config.json (introduced in Anki 2.1) """ try: getConfig = mw.addonManager.getConfig except AttributeError: return LEGACY_CONFIG return getConfig(__name__)
cc099497d55ccef8195b47dd4d080695e9af370c
8,455
def ping(request): """ This view returns a dummy json. It is meant to be used to check whether the server is alive or not """ return Json(None)
54f8d05454913b4119f1580f5d8a19a878e76c13
8,456
import numpy as np import copy def copy_ffn(model): """Copy feed forward network model. Args: model: A previously created ffn model Returns: A copy of the model """ #init model as list holding data for each layer start with input layer newmodel = [] newmodel.append({ "layer":0, "n": copy.copy(model[0]['n']), "activation": copy.copy(model[0]["activation"]), "lreg": copy.copy(model[0]["lreg"]), "regval": copy.copy(model[0]["regval"]), "desc": copy.copy(model[0]["desc"]) }) # init weights and biases for hidden layers and declare activation function for layer in range(1, len(model)): newmodel.append({ "layer":layer, "n": copy.copy(model[layer]['n']), "activation": copy.copy(model[layer]["activation"]), "lreg": copy.copy(model[layer]["lreg"]), "regval": copy.copy(model[layer]["regval"]), "desc": copy.copy(model[layer]["desc"]), "weight": np.copy(model[layer]["weight"]), "bias": np.copy(model[layer]["bias"]), "weightdot": np.copy(model[layer]["weightdot"]), "biasdot": np.copy(model[layer]["biasdot"]) }) return newmodel
5bde1163d5d53a75839b15aaa38a28ecc54b195c
8,457
import tempfile import os import subprocess import glob def nlp_progress() -> TaskDB: """Parse a the whole nlp progress repo or a single markdown file. Checkouts the nlp progress git repository and parses all the markdown files in it. Returns: TaskDB: Populated task database. """ tdb = TaskDB() with tempfile.TemporaryDirectory() as tmpdir: repo_path = os.path.join(tmpdir, "nlp-progress") cp = subprocess.run( ["git", "clone", NLP_PROGRESS_REPO, repo_path], capture_output=True ) if cp.returncode != 0: logger.error("stdout: %s", cp.stdout) logger.error("stderr: %s", cp.stderr) raise DataError("Could not clone the NLP Progress repository.") filenames = glob.glob(os.path.join(repo_path, "english", "*.md")) for filename in filenames: file_tdb = parse_file(filename) for task in file_tdb.tasks.values(): tdb.add_task(task) return tdb
62a2d14d8939a3f2d20814e131771efc7ca7ec47
8,458
def is_big(label: str) -> bool: """Returns whether or not a cave is large based on its label""" return label.isupper()
7abdb0c5687e7870c96b767dc498e1f3c4ed21fe
8,459
def fast_mult_polynoms(a, b): """Fast multiply of two polynoms in GF(2^8) using the log table NB. This is NOT constant-time and leaks secret values in timing differences. DO NOT USE THIS CODE TO IMPLEMENT SECURE APPLICATIONS """ if a == 0 or b == 0: return 0 return POWER_X1_TABLE[(LOG_X1_TABLE[a] + LOG_X1_TABLE[b]) % 255]
3d670b2380963c50f74eb2f671ccdf7378ce58aa
8,460
def get_page_for_group(user_groups, slug): """ Returns a page associated with user_groups given a slug. """ try: page = get_pages_for_group(user_groups).get( slug = slug) except Page.DoesNotExist: page = None return page
6ea742688f07ca2ee0dd1ee4665598e074759229
8,461
def read_gps(gps_path): """Read GPS feed in CSV. Expects GPS structured as: vehicle_id: str Internal system identification of the vehicle. Should be unique per vehicle, and is used for tracking the vehicle as it proceeds through the system. route_id: str The route_id from the GTFS feed that this selector refers to datetime: datetime Moment at which the vehicle's position was measured latitude: float Degrees North, in the WGS-84 coordinate system. longitude: float Degrees East, in the WGS-84 coordinate system. Parameters ---------- gps_path : [type] [description] Returns ------- pm.MoveDataFrame GPS data as a MoveDataFrame """ return pm.MoveDataFrame( data=pd.read_csv(gps_path), latitude="latitude", longitude="longitude", datetime="datetime", traj_id="vehicle_id", )
2ec09b69646b31e38b07e25bf2ffa0f0c002f52b
8,462
def _ureduce(a, func, **kwargs): """ Internal Function. Call `func` with `a` as first argument swapping the axes to use extended axis on functions that don't support it natively. Returns result and a.shape with axis dims set to 1. Parameters ---------- a : array_like Input tensor or object that can be converted to a tensor. func : callable Reduction function capable of receiving a single axis argument. It is called with `a` as first argument followed by `kwargs`. kwargs : keyword arguments additional keyword arguments to pass to `func`. Returns ------- result : tuple Result of func(a, **kwargs) and a.shape with axis dims set to 1 which can be used to reshape the result to the same shape a ufunc with keepdims=True would produce. """ axis = kwargs.get('axis', None) if axis is not None: keepdim = list(a.shape) nd = a.ndim axis = normalize_axis_tuple(axis, nd) for ax in axis: keepdim[ax] = 1 if len(axis) == 1: kwargs['axis'] = axis[0] else: keep = set(range(nd)) - set(axis) nkeep = len(keep) # swap axis that should not be reduced to front for i, s in enumerate(sorted(keep)): a = a.swapaxes(i, s) # merge reduced axis a = a.reshape(a.shape[:nkeep] + (-1,)) kwargs['axis'] = -1 keepdim = tuple(keepdim) else: keepdim = (1,) * a.ndim r = func(a, **kwargs) return r, keepdim
93be4e1a26dec25b74e6a9f330863ea7677cd614
8,463
def compute_autocorrelation_local(x, Fs, N, H, norm_sum=True): """Compute local autocorrelation [FMP, Section 6.2.3] Notebook: C6/C6S2_TempogramAutocorrelation.ipynb Args: x: Input signal Fs: Sampling rate N: Window length H: Hop size norm_sum: Normalizes by the number of summands in local autocorrelation Returns: A: Time-lag representation T_coef: Time axis (seconds) F_coef_lag: Lag axis """ L = len(x) L_left = round(N / 2) L_right = L_left x_pad = np.concatenate((np.zeros(L_left), x, np.zeros(L_right))) L_pad = len(x_pad) M = int(np.floor(L_pad - N) / H) + 1 A = np.zeros((N, M)) win = np.ones(N) if norm_sum is True: lag_summand_num = np.arange(N, 0, -1) for n in range(M): t_0 = n * H t_1 = t_0 + N x_local = win * x_pad[t_0:t_1] r_xx = np.correlate(x_local, x_local, mode='full') r_xx = r_xx[N-1:] if norm_sum is True: r_xx = r_xx / lag_summand_num A[:, n] = r_xx Fs_A = Fs / H T_coef = np.arange(A.shape[1]) / Fs_A F_coef_lag = np.arange(N) / Fs return A, T_coef, F_coef_lag
8e67de8279e0daae90ae3391064ea92b023dfafc
8,464
def euclid_dist(vector_p1, vector_p2): """ calculated the euclidean distance between 2 points """ distances = vector_p1 - vector_p2 return cp.hypot(distances[:, :, 0], distances[:, :, 1])
6f9d366cddb62f9ad1a8e26c9c2179fb73238a32
8,465
def _name_cleaner(agent_name): """Renames agent_name to prettier string for plots.""" rename_dict = {'correct_ts': 'Correct TS', 'kl_ucb': 'KL UCB', 'misspecified_ts': 'Misspecified TS', 'ucb1': 'UCB1', 'ucb-best': 'UCB-best', 'nonstationary_ts': 'Nonstationary TS', 'stationary_ts': 'Stationary TS', 'greedy': 'greedy', 'ts': 'TS', 'action_0': 'Action 0', 'action_1': 'Action 1', 'action_2': 'Action 2', 'bootstrap': 'bootstrap TS', 'laplace': 'Laplace TS', 'thoughtful': 'Thoughtful TS', 'gibbs': 'Gibbs TS'} if agent_name in rename_dict: return rename_dict[agent_name] else: return agent_name
e874745e804e07e385b377ec0ecd4247640ef6ce
8,466
def add_training_args(parser): """Training arguments.""" group = parser.add_argument_group('train', 'training configurations') group.add_argument('--experiment-name', type=str, default="gpt-345M", help="The experiment name for summary and checkpoint") group.add_argument('--batch-size', type=int, default=4, help='Data Loader batch size') group.add_argument('--gradient-accumulation-steps', type=int, default=1, help='Data Loader batch size') group.add_argument('--weight-decay', type=float, default=0.01, help='weight decay coefficient for L2 regularization') group.add_argument('--checkpoint-activations', action='store_true', help='checkpoint activation to allow for training ' 'with larger models and sequences') group.add_argument('--checkpoint-num-layers', type=int, default=1, help='chunk size (number of layers) for checkpointing') group.add_argument('--deepspeed-activation-checkpointing', action='store_true', help='uses activation checkpointing from deepspeed') group.add_argument('--epochs', type=int, default=None, help='Number of finetunning epochs. Zero results in evaluation only.') group.add_argument('--clip-grad', type=float, default=1.0, help='gradient clipping') group.add_argument('--train-iters', type=int, default=0, help='total number of iterations to train over all training runs') group.add_argument('--label-smoothing', type=float, default=0.0) group.add_argument('--log-interval', type=int, default=100, help='report interval') group.add_argument('--summary-dir', type=str, default="", help="The directory to store the summary") group.add_argument('--seed', type=int, default=1234, help='random seed') # Batch producer arguments group.add_argument('--reset-position-ids', action='store_true', help='Reset posistion ids after end-of-document token.') group.add_argument('--reset-attention-mask', action='store_true', help='Reset self attention maske after ' 'end-of-document token.') # Learning rate. group.add_argument('--lr-decay-iters', type=int, default=None, help='number of iterations to decay LR over,' ' If None defaults to `--train-iters`*`--epochs`') group.add_argument('--lr-decay-style', type=str, default='linear', choices=['constant', 'linear', 'cosine', 'exponential'], help='learning rate decay function') group.add_argument('--lr-decay-ratio', type=float, default=0.1) group.add_argument('--lr', type=float, default=1.0e-4, help='initial learning rate') group.add_argument('--warmup', type=float, default=0.01, help='percentage of data to warmup on (.01 = 1% of all ' 'training iters). Default 0.01') group.add_argument('--switch-linear', action='store_true', help="Switch to linear decay for cosine decay") # model checkpointing group.add_argument('--save', type=str, default=None, help='Output directory to save checkpoints to.') group.add_argument('--new-save-directory', action='store_true') group.add_argument('--save-epoch', type=int, default=1, help='number of epochs between saves') group.add_argument('--save-interval', type=int, default=5000, help='number of iterations between saves') group.add_argument('--no-save-optim', action='store_true', help='Do not save current optimizer.') group.add_argument('--no-save-rng', action='store_true', help='Do not save current rng state.') group.add_argument('--load', type=str, default=None, help='Path to a directory containing a model checkpoint.') group.add_argument('--no-load-optim', action='store_true', help='Do not load optimizer when loading checkpoint.') group.add_argument('--no-load-rng', action='store_true', help='Do not load rng state when loading checkpoint.') group.add_argument('--no-load-lr-scheduler', action='store_true', help='Do not load lr scheduler when loading checkpoint.') group.add_argument('--no-deepspeed-load', action='store_true', help='Not use deepspeed when loading checkpoint') group.add_argument('--finetune', action='store_true', help='Load model for finetuning. Do not load optimizer ' 'or rng state from checkpoint and set iteration to 0. ' 'Assumed when loading a release checkpoint.') group.add_argument('--resume-dataloader', action='store_true', help='Resume the dataloader when resuming training. ' 'Does not apply to tfrecords dataloader, try resuming' 'with a different seed in this case.') # distributed training args group.add_argument('--distributed-backend', default='nccl', help='which backend to use for distributed training. One of [gloo, nccl]', choices=['nccl', 'gloo']) group.add_argument('--DDP-impl', default='torch', choices=['local', 'torch', 'none'], help='which DistributedDataParallel implementation to use.') group.add_argument('--local_rank', type=int, default=None, help='local rank passed from distributed launcher') # BlockLM training args group.add_argument('--block-lm', action='store_true', help="whether use the BlockLM pre-training") group.add_argument('--masked-lm', action='store_true', help='whether to use the mlm objective') group.add_argument('--bert-prob', type=float, default=0.5) group.add_argument('--gpt-infill-prob', type=float, default=0.5) group.add_argument('--gpt-min-ratio', type=float, default=0.5) group.add_argument('--gap-sentence-prob', type=float, default=0.0) group.add_argument('--gap-sentence-ratio', type=float, default=0.15) group.add_argument('--avg-block-length', type=int, default=3) group.add_argument('--short-seq-prob', type=float, default=0.0) group.add_argument('--single-span-prob', type=float, default=0.0) group.add_argument('--task-mask', action='store_true', help="Use different mask for generation and blank filling") group.add_argument('--no-shuffle-block', action='store_true', help="not shuffle the blocks when filling the blank") group.add_argument('--no-block-position', action='store_true', help='Use (rough) absolute positions instead of block positions') group.add_argument('--sentinel-token', action='store_true', help="Use sentinel (mask) tokens to replace 2d position encoding") group.add_argument('--block-mask-prob', type=float, default=0.0) group.add_argument('--context-mask-ratio', type=float, default=0.0) group.add_argument('--random-position', action='store_true', help="Use random start position to cover all the position embeddings") return parser
05c71d77320644fdaf00ef1638e76dbbce60ffb5
8,467
from typing import Optional from typing import List from typing import Dict def _multi_class_confusion_matrix_plot( thresholds: Optional[List[float]] = None, num_thresholds: Optional[int] = None, name: Text = MULTI_CLASS_CONFUSION_MATRIX_PLOT_NAME, eval_config: Optional[config.EvalConfig] = None, model_name: Text = '', output_name: Text = '', ) -> metric_types.MetricComputations: """Returns computations for multi-class confusion matrix plot.""" if num_thresholds is None and thresholds is None: thresholds = [0.0] key = metric_types.PlotKey( name=name, model_name=model_name, output_name=output_name) # Make sure matrices are calculated. matrices_computations = ( multi_class_confusion_matrix_metrics.multi_class_confusion_matrices( thresholds=thresholds, num_thresholds=num_thresholds, eval_config=eval_config, model_name=model_name, output_name=output_name)) matrices_key = matrices_computations[-1].keys[-1] def result( metrics: Dict[metric_types.MetricKey, multi_class_confusion_matrix_metrics.Matrices] ) -> Dict[metric_types.PlotKey, metrics_for_slice_pb2.MultiClassConfusionMatrixAtThresholds]: return { key: metrics[matrices_key].to_proto() .multi_class_confusion_matrix_at_thresholds } derived_computation = metric_types.DerivedMetricComputation( keys=[key], result=result) computations = matrices_computations computations.append(derived_computation) return computations
9f670ad80ea10c05460815fd9af250d60b035d9e
8,468
def innerL(i, os): """ Parameters ---------- i os:OptStruct Returns ------- """ ei = cal_ek(os, i) if (os.labels[i] * ei < -os.tol and os.alphas[i] < os.C) or ( os.labels[i] * ei > os.tol and os.alphas[i] > 0 ): j, ej = select_j(i, os, ei) alphaIold = os.alphas[i].copy() alphaJold = os.alphas[j].copy() if os.labels[i] != os.labels[j]: L = max(0, os.alphas[j] - os.alphas[i]) H = min(os.C, os.C + os.alphas[j] - os.alphas[i]) else: L = max(0, os.alphas[j] + os.alphas[i] - os.C) H = min(os.C, os.alphas[j] + os.alphas[i]) if L == H: print("L==H") return 0 eta = ( 2.0 * os.X[i, :] * os.X[j, :].T - os.X[i, :] * os.X[i, :].T - os.X[j, :] * os.X[j, :].T ) if eta >= 0: print("eta>=0") return 0 os.alphas[j] -= os.labels[j] * (ei - ej) / eta os.alphas[j] = clip_alpha(os.alphas[j], H, L) update_ek(os, j) if abs(os.alphas[j] - alphaJold) < 0.00001: print("j 移动不足") return 0 os.alphas[i] += os.labels[j] * os.labels[i] * (alphaJold - alphaIold) update_ek(os, i) b1 = ( os.b - ei - os.labels[i] * (os.alphas[i] - alphaIold) * os.X[i, :] * os.X[i, :].T - os.labels[j] * (os.alphas[j] - alphaJold) * os.X[i, :] * os.X[i, :].T ) b2 = ( os.b - ej - os.labels[i] * (os.alphas[i] - alphaIold) * os.X[i, :] * os.X[i, :].T - os.labels[j] * (os.alphas[j] - alphaJold) * os.X[i, :] * os.X[i, :].T ) if os.alphas[i] and os.C > os.alphas[i]: os.b = b1 elif os.alphas[j] > 0 and os.C > os.alphas[j]: os.b = b2 else: os.b = (b1 + b2) / 2.0 return 1 else: return 0
1fc83191dbb16d863aef7c947408126d79c40099
8,469
import click import logging def create_client(ctx: click.Context, opts: ProxyContext) -> CumulocityClient: """Create Cumulocity client and prompt for missing credentials if necessary. Args: ctx (click.Context): Click context opts (ProxyContext): Proxy options Returns: CumulocityClient: Configured Cumulocity client """ if not opts.disable_prompts and not opts.host: opts.host = click.prompt( text="Enter the Cumulocity Host/URL", ) client = CumulocityClient( hostname=opts.host, tenant=opts.tenant, user=opts.user, password=opts.password, tfacode=opts.tfa_code, token=opts.token, ignore_ssl_validate=opts.ignore_ssl_validate, ) if not client.url: opts.show_error( "No Cumulocity host was provided. The host can be set via" "environment variables, arguments or the env-file" ) ctx.exit(ExitCodes.NO_SESSION) logging.info("Checking tenant id") client.validate_tenant_id() # Retry logging so the user can be prompted for # their credentials/TFA code etc. without having to run c8ylp again retries = 3 success = False while retries: try: if client.token: client.validate_credentials() else: client.login() if opts.env_file and opts.store_token: store_credentials(opts, client) success = True break except CumulocityMissingTFAToken as ex: client.tfacode = click.prompt( text="Enter your Cumulocity TFA-Token", hide_input=False ) except Exception as ex: logging.info("unknown exception: %s", ex) if not opts.disable_prompts: if not client.user: client.user = click.prompt( text="Enter your Cumulocity Username", ) if not client.password: client.password = click.prompt( text="Enter your Cumulocity Password [input hidden]", hide_input=True, ) retries -= 1 if not success: logging.info("Could not create client") ctx.exit(ExitCodes.NO_SESSION) return client
cad28ef10409352fe25ae7310fbaae4a095b8a21
8,470
import struct def get_float(data, index): """get a float value from data array""" return struct.unpack('f', "".join(map(chr, data[4*index:(index+1)*4])))[0]
b78a5472bef42312bd765b6f9c58bfe9cddbf311
8,471
def gelu(tensor): """ Gaussian Error Linear Unit - https://arxiv.org/abs/1606.08415 """ return 0.5 * tensor * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (tensor + 0.044715 * tf.pow(tensor, 3))))
acb5101815bb3cd0c30d602fefb0734707bf4acf
8,472
def _uniqueElements(an_iterable): """ :param iterable an_iterable: :param int idx: :return list: has only one occurrence of each element """ used = [] unique = [x for x in an_iterable if x not in used and (used.append(x) or True)] return unique
8290d30e48c3ade4a547d7c3a8cf0c57b8d45b19
8,473
def guestbook_key(guestbook_name=None): """Constructs a Datastore key for a Guestbook entity with name.""" return ndb.Key('Guestbook', guestbook_name or 'default_guestbook')
fcff509ad5e48b58ffa823801af134c20e974b56
8,474
def _bias_scale(x, b, data_format): """The multiplication counter part of tf.nn.bias_add.""" if data_format == 'NHWC': return x * b elif data_format == 'NCHW': return x * b else: raise ValueError('invalid data_format: %s' % data_format)
19e5bb9419827f6e6976b1c5ed3cd40cdd676ad0
8,475
import re def checkTableName(tables): """ Check if table name has an underscore or not.""" bad = set() output = [] for i in tables: if re.search('.*_.*', i): bad.add(i) if bad: output.append("These tables have underscores in the name") for i in bad: output.append(i) output.append("") else: output.append("No malformed table names") output.append("") return (output, bad)
2847c20712e6ce92367772678d058a05b5d10dc3
8,476
def load_split_from_tfds_builder(builder, batch_size, split, preprocess_example=None, augment_train_example=None, shuffle_buffer_size=None, shuffle_seed=0, cache=True): """Loads a split from a dataset using TensorFlow Datasets compatible builder. Args: builder: tfds.core.DatasetBuilder; A TFDS compatible dataset builder. batch_size: int; The batch size returned by the data pipeline. split: str; Name of the split to be loaded. preprocess_example: function; A function that given an example, returns the preprocessed example. Note that the preprocessing is done BEFORE caching to re-use them. augment_train_example: A function that given a train example returns the augmented example. Note that this function is applied AFTER caching and repeat to get true randomness. shuffle_buffer_size: int; Size of the tf.data.dataset shuffle buffer. shuffle_seed: int; Seed for shuffling the training data. cache: bool; Whether to cache dataset in memory. Returns: A `tf.data.Dataset`, and dataset information. """ # Prepare map functions. preprocess_example = preprocess_example or (lambda ex: ex) augment_train_example = augment_train_example or (lambda ex: ex) shuffle_buffer_size = shuffle_buffer_size or (8 * batch_size) # Download dataset: builder.download_and_prepare() # Each host is responsible for a fixed subset of data. base_split_name, host_start, host_end = get_data_range( builder, split, jax.process_index(), jax.process_count()) data_range = tfds.core.ReadInstruction( base_split_name, unit='abs', from_=host_start, to=host_end) ds = builder.as_dataset(split=data_range, shuffle_files=False) options = tf.data.Options() options.threading.private_threadpool_size = 48 ds = ds.with_options(options) # Applying preprocessing before `ds.cache()` to re-use it. ds = ds.map( preprocess_example, num_parallel_calls=tf.data.experimental.AUTOTUNE) # Caching. if cache: ds = ds.cache() if 'train' in split: # First repeat then batch. ds = ds.repeat() # Augmentation should be done after repeat for true randomness. ds = ds.map( augment_train_example, num_parallel_calls=tf.data.experimental.AUTOTUNE) # Shuffle after augmentation to avoid loading uncropped images into buffer: ds = ds.shuffle(shuffle_buffer_size, seed=shuffle_seed) ds = ds.batch(batch_size, drop_remainder=True) else: # First batch then repeat. ds = ds.batch(batch_size, drop_remainder=False) ds = ds.repeat() ds = ds.prefetch(tf.data.experimental.AUTOTUNE) return ds, builder.info
63c73f65cedc1fff92ce9a02ea23822c8e411439
8,477
import os import torch def load_model_weights(model, filename, verbose=1, cp_folder=""): """ Loads the weights of a PyTorch model. The exception handles cpu/gpu incompatibilities Arguments: model {torch module} -- Model to load the weights to filename {str} -- Name of the checkpoint Keyword Arguments: verbose {int} -- Whether to display infos (default: {1}) cp_folder {str} -- Folder to load from (default: {''}) Returns: torch module -- Model with loaded weights """ if verbose: print(f"\n -> Loading weights from {os.path.join(cp_folder,filename)}\n") try: model.load_state_dict(os.path.join(cp_folder, filename), strict=strict) except BaseException: model.load_state_dict( torch.load(os.path.join(cp_folder, filename), map_location="cpu"), strict=True, ) return model
b0f011e3b05d239be955df308a045496805a50c6
8,478
def analyze(tokens): """ 表达式元素组合,形成操作树 """ assert_non_empty(tokens) # 数字或者操作符 token = analyze_token(tokens.pop(0)) # 如果是数字,直接放回就好了,继续求下一个,因为数字是自求解的,本身就是解 if type(token) in (int, float): return token # 如果是操作符,则需要组合为Exp表达式 if token in known_operators: # 当前是操作符, 则需要检查后面有没有操作数 # 计算器的操作符后面是有操作数的 # 操作数递归组合即可 if len(tokens) == 0 or tokens.pop(0) != '(': raise SyntaxError('expected ( after ' + token) return Exp(token, analyze_operands(tokens)) else: raise SyntaxError('unexpected ' + token)
369b0b3df423dd3a38e0756379442e428efb7ef3
8,479
from typing import Mapping from typing import Any def copy_dict(dic: Mapping[str, Any], depth: int = 1) -> Mapping[str, Any]: """Deep copy a dict Args: dic: The dict to be copied depth: The depth to be deep copied Returns: The deep-copied dict """ if depth <= 1: return dic.copy() return { key: copy_dict(val, depth - 1) if isinstance(val, dict) else val for key, val in dic.items() }
a75f9ef7c8dc797ccdf47cdc3029c403b09e75cf
8,480
def get_wrf_config(wrf_config, start_date=None, **kwargs): """ precedence = kwargs > wrf_config.json > constants """ if start_date is not None: wrf_config['start_date'] = start_date for key in kwargs: wrf_config[key] = kwargs[key] return wrf_config
c9e070b91ab93a7cb81a576aa799537361b7a26f
8,481
from Bio import PDB def pdb_to_structure(filename): """Import a structure object from a PDB file. """ try: except ImportError: print("I can't import Biopython which is needed to handle PDB files.") raise p = PDB.PDBParser() structure = p.get_structure("S", filename) for _ in structure.get_chains(): atoms = [np.array(atom.get_coord()) for atom in structure.get_atoms()] return atoms
1b77b6bc5af75d03af847032827c07656addf4f3
8,482
def allocation_ncsist(): """ Real Name: Allocation NCSIST Original Eqn: IF THEN ELSE( ShiMen Reservoir Depth>=ShiMenReservoir Operation Rule Lower Limit , 6048, IF THEN ELSE( ShiMen Reservoir Depth >=ShiMenReservoir Operation Rule Lower Severe Limit, 6048*0.9 , 6048*0.8 ) ) Units: m3 Limits: (None, None) Type: component Subs: None water right 6048(m^3 per day), the same for each Ten-days; 0.07 CMSD, classified as Domestic. """ return if_then_else( shimen_reservoir_depth() >= shimenreservoir_operation_rule_lower_limit(), lambda: 6048, lambda: if_then_else( shimen_reservoir_depth() >= shimenreservoir_operation_rule_lower_severe_limit(), lambda: 6048 * 0.9, lambda: 6048 * 0.8, ), )
f2b781869957d78dc59e6837a253bc0df29250bd
8,483
def hamming(s1, s2): """Return the hamming distance between 2 DNA sequences""" return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2)) + abs(len(s1) - len(s2))
e3e1f3e9cc883f27d26f00c1b3c9495d29c1a139
8,484
def extract_geometric_plane(polygon: Polygon, plane_triangle_indices, tri_mesh: HalfEdgeTriangulation, normal: np.ndarray): """Will extract geometric details from the polygon and plane of interest Args: polygon (Polygon): Shapely Polygon of a flat surface plane_triangle_indices (ndarray uint64): Triangle indices of the plane in the mesh tri_mesh (HalfEdgeTriangulation): The mesh of the environment normal (np.ndarray): The surface normal that this plane was extracted on Returns: [type]: [description] """ # triangles:np.ndarray = np.asarray(tri_mesh.triangles) # vertices:np.ndarray = np.asarray(tri_mesh.vertices) # all_point_indices = triangles[plane_triangle_indices, :] # all_point_indices = np.reshape(all_point_indices, (np.prod(all_point_indices.shape), )) # all_point_indices = np.unique(all_point_indices) # all_points = vertices[all_point_indices, :] all_points = np.asarray(polygon.exterior.coords) # centroid = np.mean(all_points, axis=0) # TODO polygon.centroid ? normal_ransac, centroid, _ = estimate_plane(all_points) return dict(point=centroid, normal=normal, all_points=all_points, area=polygon.area, normal_ransac=normal_ransac)
c1d3a359c013622e5ca4864e50de5c34e6001c9c
8,485
import torch def get_org_df(pr_label_f, metadata_df, seq_len): """ Returns the org_df given pr_label_f,metadata_df, """ org_r, org_c = torch.nonzero(pr_label_f, as_tuple=True) org_df = cudf.DataFrame() org_df["seq_row"] = cudf.Series(org_r) org_df["org_seq_col"] = cudf.Series(org_c) org_df = org_df.merge(metadata_df) org_df = org_df.rename(columns={"seq_row": "org_seq_row"}) org_df["flat_loc_org"] = org_df["org_seq_row"] * seq_len + org_df["org_seq_col"] ### Trim overlapping and invalid predictions flag = (org_df["org_seq_col"] >= org_df["start_index"]) & ( org_df["org_seq_col"] <= org_df["stop_index"] ) org_df = org_df[flag] return org_df[["org_seq_row", "org_seq_col", "input_text_index", "flat_loc_org"]]
543bfe8f95409eefeb792ee2f94d8518fa4a3fe3
8,486
from scipy.stats import norm def binomial_proportion(nsel, ntot, coverage=0.68): """ Calculate a binomial proportion (e.g. efficiency of a selection) and its confidence interval. Parameters ---------- nsel: array-like Number of selected events. ntot: array-like Total number of events. coverage: float (optional) Requested fractional coverage of interval (default: 0.68). Returns ------- p: array of dtype float Binomial fraction. dpl: array of dtype float Lower uncertainty delta (p - pLow). dpu: array of dtype float Upper uncertainty delta (pUp - p). Examples -------- >>> p, dpl, dpu = binomial_proportion(50,100,0.68) >>> round(p, 3) 0.5 >>> round(dpl, 3) 0.049 >>> round(dpu, 3) 0.049 >>> abs(np.sqrt(0.5*(1.0-0.5)/100.0)-0.5*(dpl+dpu)) < 1e-3 True Notes ----- The confidence interval is approximate and uses the score method of Wilson. It is based on the log-likelihood profile and can undercover the true interval, but the coverage is on average closer to the nominal coverage than the exact Clopper-Pearson interval. It is impossible to achieve perfect nominal coverage as a consequence of the discreteness of the data. """ z = norm().ppf(0.5 + 0.5 * coverage) z2 = z * z p = np.asarray(nsel, dtype=np.float) / ntot div = 1.0 + z2 / ntot pm = (p + z2 / (2 * ntot)) dp = z * np.sqrt(p * (1.0 - p) / ntot + z2 / (4 * ntot * ntot)) pl = (pm - dp) / div pu = (pm + dp) / div return p, p - pl, pu - p
94b9d3cf766ca2f35f677a4421aabc1840097729
8,487
def nasnet_dual_path_scheme_ordinal(module, x, _): """ NASNet specific scheme of dual path response for an ordinal module with dual inputs/outputs in a DualPathSequential module. Parameters: ---------- module : nn.Module A module. x : Tensor Current processed tensor. Returns ------- x_next : Tensor Next processed tensor. x : Tensor Current processed tensor. """ return module(x), x
aef487a25bc3349f14a112826ee4f8e8912dd324
8,488
import json import traceback def ifttt_budget_options(): """ Option values for the budget field """ if "IFTTT-Service-Key" not in request.headers or \ request.headers["IFTTT-Service-Key"] != get_ifttt_key(): return json.dumps({"errors": [{"message": "Invalid key"}]}), 401 try: data = get_ynab_budgets() return json.dumps({"data": data}) except: traceback.print_exc() return json.dumps({"data": [{"label": "ERROR retrieving YNAB data", "value": ""}]})
c987ca533fc0568e759e4e6c6affbdb7efeb4781
8,489
import sqlite3 def get_exp_date_stats(db_file_name, Table): """Caculate exp date stats of collection""" conn = sqlite3.connect(db_file_name) c = conn.cursor() c.execute('''SELECT exp, count(exp) FROM {} GROUP BY exp'''.format(Table)) exp_dict = {} results = c.fetchall() for result in results: exp_dict[str(result[0])] = result[1] conn.commit() conn.close() return exp_dict
7641d6309939359c1d790b66a1310b5b78be99a4
8,490
import random def create_default_identifier(node_address, token_address, target): """ The default message identifier value is the first 8 bytes of the sha3 of: - Our Address - Our target address - The token address - A random 8 byte number for uniqueness """ hash_ = sha3('{}{}{}{}'.format( node_address, target, token_address, random.randint(0, UINT64_MAX) )) return int(hash_[0:8].encode('hex'), 16)
ae63898d0130eda2cbc1a6e3861b288e9b1a4d10
8,491
import logging def set_layers_to_non_trainable(model, layers): """ Set layers of a model to non-trainable """ layers_to_non_trainable = [model.layers[i] for i in layers] for layer in layers_to_non_trainable: layer.trainable = False for layer in model.layers: logging.debug("Layer %s is trainable: %s" % (layer.name, layer.trainable)) return model
4f01706247984e4987d777d43d4b769cf262ee20
8,492
def print_scientific_16(value: float) -> str: """ Prints a value in 16-character scientific notation. This is a sub-method and shouldnt typically be called .. seealso:: print_float_16 for a better method """ if value == 0.0: return '%16s' % '0.' python_value = '%16.14e' % value # -1.e-2 svalue, sexponent = python_value.strip().split('e') exponent = int(sexponent) # removes 0s if abs(value) < 1.: sign = '-' else: sign = '+' # the exponent will be added later... sexp2 = str(exponent).strip('-+') value2 = float(svalue) # the plus 1 is for the sign len_sexp = len(sexp2) + 1 leftover = 16 - len_sexp if value < 0: fmt = "%%1.%sf" % (leftover - 3) else: fmt = "%%1.%sf" % (leftover - 2) svalue3 = fmt % value2 svalue4 = svalue3.strip('0') field = "%16s" % (svalue4 + sign + sexp2) return field
18072bfb5cc51e83f1c26086558abc4019e4737e
8,493
def _interpolate_target(bin_edges, y_vals, idx, target): """Helper to identify when a function y that has been discretized hits value target. idx is the first index where y is greater than the target """ if idx == 0: y_1 = 0. else: y_1 = y_vals[idx - 1] y_2 = y_vals[idx] edge_1 = bin_edges[idx] edge_2 = bin_edges[idx + 1] frac = (target - y_1) / (y_2 - y_1) x = edge_1 + frac * (edge_2 - edge_1) return x
7a84bc846c8446aa7449732fdb60171d6f144863
8,494
def azimuth_range_to_lat_lon(azimuths, ranges, center_lon, center_lat, geod=None): """Convert azimuth and range locations in a polar coordinate system to lat/lon coordinates. Pole refers to the origin of the coordinate system. Parameters ---------- azimuths : array_like array of azimuths defining the grid. If not a `pint.Quantity`, assumed to be in degrees. ranges : array_like array of range distances from the pole. Typically in meters. center_lat : float The latitude of the pole in decimal degrees center_lon : float The longitude of the pole in decimal degrees geod : `pyproj.Geod` or ``None`` PyProj Geod to use for forward azimuth and distance calculations. If ``None``, use a default spherical ellipsoid. Returns ------- lon, lat : 2D arrays of longitudes and latitudes corresponding to original locations Notes ----- Credit to Brian Blaylock for the original implementation. """ if geod is None: g = Geod(ellps='sphere') else: g = geod rng2d, az2d = np.meshgrid(ranges, azimuths) lats = np.full(az2d.shape, center_lat) lons = np.full(az2d.shape, center_lon) lon, lat, _ = g.fwd(lons, lats, az2d, rng2d) return lon, lat
a68e9e6731393f454d5725267b5a7c56e2afaedd
8,495
def count_path_recursive(m, n): """Count number of paths with the recursive method.""" def traverse(m, n, location=[1, 1]): # return 0 if past edge if location[0] > m or location[1] > n: return 0 # return 1 if at end position if location == [m, n]: return 1 return traverse(m, n, [location[0] + 1, location[1]]) + traverse(m, n, [location[0], location[1] + 1]) return traverse(m, n)
ad31718d179bf46966117ecfa414807e6d356634
8,496
def markdown(caller): """Renders the argument to markdown. Useful in `{% filter markdown() %} ` blocks Args: caller (str): Markdown source Returns: str: rendered HTML """ return render_markdown(caller)
fd3fcea8ae9cbac660c1f8971e89baa1c61467ac
8,497
from typing import List import warnings def aggregate_threedi_results(gridadmin: str, results_3di: str, demanded_aggregations: List[Aggregation], bbox=None, start_time: int = None, end_time: int = None, subsets=None, epsg: int = 28992, interpolation_method: str = None, resample_point_layer: bool = False, resolution: float = None, output_flowlines: bool = True, output_nodes: bool = True, output_cells: bool = True, output_rasters: bool = True): """ # TODO: use new version of threedi_ogr that inludes adding default attributes to nodes, cells and flowline layers :param resolution: :param interpolation_method: :param gridadmin: path to gridadmin.h5 :param results_3di: path to results_3di.nc :param demanded_aggregations: list of dicts containing variable, method, [threshold] :param bbox: bounding box [min_x, min_y, max_x, max_y] :param start_time: start of time filter (seconds since start of simulation) :param end_time: end of time filter (seconds since start of simulation) :param subsets: :param epsg: epsg code to project the results to :return: an ogr Memory DataSource with one or more Layers: node (point), cell (polygon) or flowline (linestring) with the aggregation results :rtype: ogr.DataSource """ # make output datasource and layers tgt_drv = ogr.GetDriverByName('MEMORY') tgt_ds = tgt_drv.CreateDataSource('') out_rasters = {} if not (output_flowlines or output_nodes or output_cells or output_rasters): return tgt_ds, out_rasters if resample_point_layer and (not output_nodes): resample_point_layer = False # perform demanded aggregations node_results = dict() line_results = dict() first_pass_nodes = True first_pass_flowlines = True for da in demanded_aggregations: # It would seem more sensical to keep the instantiatian of gr, the subsetting and filtering outside the loop... # ... but for some strange reason that leads to an error if more than 2 flowline aggregations are demanded gr = GridH5ResultAdmin(gridadmin, results_3di) # TODO: select subset # Spatial filtering if bbox is None: lines = gr.lines nodes = gr.nodes cells = gr.cells else: if bbox[0] >= bbox[2] or bbox[1] >= bbox[3]: raise Exception('Invalid bounding box.') lines = gr.lines.filter(line_coords__in_bbox=bbox) if lines.count == 0: raise Exception('No flowlines found within bounding box.') nodes = gr.nodes.filter(coordinates__in_bbox=bbox) cells = gr.cells.filter( coordinates__in_bbox=bbox) # filter on cell center coordinates to have the same results for cells as for nodes if nodes.count == 0: raise Exception('No nodes found within bounding box.') new_column_name = da.as_column_name() if da.variable.short_name in AGGREGATION_VARIABLES.short_names(var_types=[VT_FLOW]): if output_flowlines: if first_pass_flowlines: first_pass_flowlines = False try: line_results[new_column_name] = time_aggregate(nodes_or_lines=lines, start_time=start_time, end_time=end_time, aggregation=da ) except AttributeError: warnings.warn('Demanded aggregation of variable that is not included in these 3Di results') line_results[new_column_name] = np.full(len(line_results['id']), fill_value=None, dtype=np.float) elif da.variable.short_name in AGGREGATION_VARIABLES.short_names(var_types=[VT_NODE]): if output_nodes or output_cells or output_rasters: if first_pass_nodes: first_pass_nodes = False try: node_results[new_column_name] = time_aggregate(nodes_or_lines=nodes, start_time=start_time, end_time=end_time, aggregation=da ) except AttributeError: warnings.warn('Demanded aggregation of variable that is not included in these 3Di results') node_results[new_column_name] = np.full(len(node_results['id']), fill_value=None, dtype=np.float) elif da.variable.short_name in AGGREGATION_VARIABLES.short_names(var_types=[VT_NODE_HYBRID]): if output_nodes or output_cells or output_rasters: if first_pass_nodes: first_pass_nodes = False try: node_results[new_column_name] = hybrid_time_aggregate(gr=gr, ids=nodes.id, start_time=start_time, end_time=end_time, aggregation=da ) except AttributeError: warnings.warn('Demanded aggregation of variable that is not included in these 3Di results') node_results[new_column_name] = np.full(len(node_results['id']), fill_value=None, dtype=np.float) # translate results to GIS layers # node and cell layers if len(node_results) > 0: attributes = node_results attr_data_types = {} for attr, vals in node_results.items(): try: attr_data_types[attr] = NP_OGR_DTYPES[vals.dtype] except KeyError: attr_data_types[attr] = ogr.OFTString if output_nodes: threedigrid_to_ogr(threedigrid_src=nodes, tgt_ds=tgt_ds, attributes=attributes, attr_data_types=attr_data_types ) if output_cells or output_rasters or resample_point_layer: threedigrid_to_ogr(threedigrid_src=cells, tgt_ds=tgt_ds, attributes=attributes, attr_data_types=attr_data_types ) # rasters if output_rasters or resample_point_layer: cell_layer = tgt_ds.GetLayerByName('cell') if cell_layer.GetFeatureCount() > 0: first_pass_rasters = True if (resolution is None or resolution == 0): resolution = gr.grid.dx[0] column_names = [] band_nr = 0 for da in demanded_aggregations: if da.variable.short_name in AGGREGATION_VARIABLES.short_names(var_types=[VT_NODE, VT_NODE_HYBRID]): col = da.as_column_name() band_nr += 1 out_rasters[col] = rasterize_cell_layer(cell_layer=cell_layer, column_name=col, pixel_size=resolution, interpolation_method=interpolation_method, pre_resample_method=da.variable.pre_resample_method) column_names.append(col) if first_pass_rasters: first_pass_rasters = False tmp_drv = gdal.GetDriverByName('MEM') tmp_ds = tmp_drv.CreateCopy('multiband', out_rasters[col]) # create resampled nodes output target_node_layer if resample_point_layer: srs = osr.SpatialReference() srs.ImportFromWkt(tmp_ds.GetProjection()) points_resampled_lyr = tgt_ds.CreateLayer('node_resampled', srs=srs, geom_type=ogr.wkbPoint) field = ogr.FieldDefn(col, ogr.OFTReal) points_resampled_lyr.CreateField(field) else: tmp_ds.AddBand(datatype=gdal.GDT_Float32) src_band = out_rasters[col].GetRasterBand(1) src_arr = src_band.ReadAsArray() tmp_band = tmp_ds.GetRasterBand(band_nr) tmp_band.WriteArray(src_arr) tmp_band.SetNoDataValue(src_band.GetNoDataValue()) if resample_point_layer: field = ogr.FieldDefn(col, ogr.OFTReal) points_resampled_lyr.CreateField(field) if resample_point_layer: tmp_points_resampled = pixels_to_geoms(raster=tmp_ds, column_names=column_names, output_geom_type=ogr.wkbPoint, output_layer_name='unimportant name') tmp_points_resampled_lyr = tmp_points_resampled.GetLayer(0) for feat in tmp_points_resampled_lyr: points_resampled_lyr.CreateFeature(feat) feat = None # flowline target_node_layer if len(line_results) > 0 and output_flowlines: attributes = line_results attr_data_types = {} for attr, vals in line_results.items(): try: attr_data_types[attr] = NP_OGR_DTYPES[vals.dtype] except KeyError: attr_data_types[attr] = ogr.OFTString threedigrid_to_ogr(threedigrid_src=lines, tgt_ds=tgt_ds, attributes=attributes, attr_data_types=attr_data_types) if not output_rasters: out_rasters = {} if (not output_cells) and (resample_point_layer or output_rasters): tgt_ds.DeleteLayer('cell') return tgt_ds, out_rasters
30fed652c2073350d8683f314b40829f38e4e4ee
8,498
from typing import Dict from typing import Any def color_menu(colno: int, colname: str, entry: Dict[str, Any]) -> int: # pylint: disable=unused-argument """color the menu""" if entry.get("__shadowed") is True: return 8 if entry.get("__deprecated") is True: return 9 return 2
090dc76475fbe7507c9687127306c34b0652e16a
8,499