content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def plot_displacement(A, B, save=False, labels=None): """ A and B are both num_samples x num_dimensions for now, num_dimensions must = 2 """ assert A.shape == B.shape assert A.shape[1] == 2 if not labels is None: assert len(labels) == A.shape[0] delta = B - A delta_dir = delta/np.linalg.norm(delta, axis=1).reshape(-1, 1) fig = plt.figure() # set size xmin = min(min(A[:, 0]), min(B[:, 0])) xmax = max(max(A[:, 0]), max(B[:, 0])) ymin = min(min(A[:, 1]), min(B[:, 1])) ymax = max(max(A[:, 1]), max(B[:, 1])) plt.xlim(1.1*xmin, 1.1*xmax) plt.ylim(1.1*ymin, 1.1*ymax) # create # add displacement arrows, possibly labels offset = 0.05 for i in xrange(A.shape[0]): plt.arrow(A[i, 0]+offset*delta_dir[i, 0], A[i, 1]+offset*delta_dir[i, 1], delta[i, 0]-2*offset*delta_dir[i, 0], delta[i, 1]-2*offset*delta_dir[i, 1], length_includes_head=True, alpha=0.5, color='grey', head_width=0.08, head_length=0.08, width=0.009) if not labels is None: plt.annotate(labels[i], xy=A[i, :], xytext=A[i, :], color='red') plt.annotate(labels[i], xy=B[i, :], xytext=B[i, :], color='blue') if labels is None: # without labels, just plot points plt.scatter(A[:, 0], A[:, 1], s=35, c='red', linewidths=0) plt.scatter(B[:, 0], B[:, 1], s=35, c='blue', linewidths=0) plt.axhline(0, color='grey', linestyle='--') plt.axvline(0, color='grey', linestyle='--') # show if save: plt.savefig('fig.png') else: plt.show() return True
8cd05bbe56c590923e5d3a51d2b6eaf933b6a71b
3,700
import math def t_dp(tdb, rh): """ Calculates the dew point temperature. Parameters ---------- tdb: float dry-bulb air temperature, [°C] rh: float relative humidity, [%] Returns ------- t_dp: float dew point temperature, [°C] """ c = 257.14 b = 18.678 a = 6.1121 d = 234.5 gamma_m = math.log(rh / 100 * math.exp((b - tdb / d) * (tdb / (c + tdb)))) return round(c * gamma_m / (b - gamma_m), 1)
d3cd7de10ef51f36bc5d5bd978d991d5bfe3ba4c
3,701
import os import zipfile def extract_archive(filepath): """ Returns the path of the archive :param str filepath: Path to file to extract or read :return: path of the archive :rtype: str """ # Checks if file path is a directory if os.path.isdir(filepath): path = os.path.abspath(filepath) print("Archive already extracted. Viewing from {}...".format(path)) return path # Checks if the filepath is a zipfile and continues to extract if it is # if not it raises an error elif not zipfile.is_zipfile(filepath): # Misuse of TypeError? :P raise TypeError("{} is not a zipfile".format(filepath)) archive_sha = SHA1_file( filepath=filepath, # Add version of slackviewer to hash as well so we can invalidate the cached copy # if there are new features added extra=to_bytes(slackviewer.__version__) ) extracted_path = os.path.join(SLACKVIEWER_TEMP_PATH, archive_sha) if os.path.exists(extracted_path): print("{} already exists".format(extracted_path)) else: # Extract zip with zipfile.ZipFile(filepath) as zip: print("{} extracting to {}...".format(filepath, extracted_path)) zip.extractall(path=extracted_path) print("{} extracted to {}".format(filepath, extracted_path)) # Add additional file with archive info create_archive_info(filepath, extracted_path, archive_sha) return extracted_path
2a066b7bd888833d035d30945d5b24653b0620a6
3,702
def LinkConfig(reset=0, loopback=0, scrambling=1): """Link Configuration of TS1/TS2 Ordered Sets.""" value = ( reset << 0) value |= ( loopback << 2) value |= ((not scrambling) << 3) return value
901fe6df8bbe8dfa65cd516ac14692594608edfb
3,703
def test_tedlium_release(): """ Feature: TedliumDataset Description: test release of tedlium Expectation: release set invalid data get throw error """ def test_config(release): try: ds.TedliumDataset(DATA_DIR_TEDLIUM_RELEASE12, release) except (ValueError, TypeError, RuntimeError) as e: return str(e) return None # test the release assert "release is not within the valid set of ['release1', 'release2', 'release3']" in test_config("invalid") assert "Argument release with value None is not of type [<class 'str'>]" in test_config(None) assert "Argument release with value ['list'] is not of type [<class 'str'>]" in test_config(["list"])
a25f042745d1eea4dca62195e1259b7bcb8beb8d
3,704
def get_data(n, input_dim, y_dim, attention_column=1): """ Data generation. x is purely random except that it's first value equals the target y. In practice, the network should learn that the target = x[attention_column]. Therefore, most of its attention should be focused on the value addressed by attention_column. :param n: the number of samples to retrieve. :param input_dim: the number of dimensions of each element in the series. :param attention_column: the column linked to the target. Everything else is purely random. :return: x: model inputs, y: model targets """ x = np.random.standard_normal(size=(n, input_dim)) y = np.random.randint(low=0, high=2, size=(n, y_dim)) for i in range(y_dim): x[:, i * 3] = y[:, i] return x, y
4c132abec92e8cd0ca6afd06e4032116d3631a50
3,705
def generate_tautomer_hydrogen_definitions(hydrogens, residue_name, isomer_index): """ Creates a hxml file that is used to add hydrogens for a specific tautomer to the heavy-atom skeleton Parameters ---------- hydrogens: list of tuple Tuple contains two atom names: (hydrogen-atom-name, heavy-atom-atom-name) residue_name : str name of the residue to fill the Residues entry in the xml tree isomer_index : int """ hydrogen_definitions_tree = etree.fromstring("<Residues/>") hydrogen_file_residue = etree.fromstring("<Residue/>") hydrogen_file_residue.set("name", residue_name) for name, parent in hydrogens: h_xml = etree.fromstring("<H/>") h_xml.set("name", name) h_xml.set("parent", parent) hydrogen_file_residue.append(h_xml) hydrogen_definitions_tree.append(hydrogen_file_residue) return hydrogen_definitions_tree
69ec0c489d93edc7d8fe3ccde8df25c9c2fd01c9
3,706
def login(client=None, **defaults): """ @param host: @param port: @param identityName: @param password: @param serviceName: @param perspectiveName: @returntype: Deferred RemoteReference of Perspective """ d = defer.Deferred() LoginDialog(client, d, defaults) return d
a6c4593ef5b1fd29f46cba4a6484049bd093b6da
3,707
import time import json import traceback def insertInstrument(): """ Insert a new instrument or edit an existing instrument on a DAQBroker database. Guest users are not allowed to create instruments. Created instruments are .. :quickref: Create/Edit instrument; Creates or edits a DAQBroker instrument instrument :param: Name : (String) unique instrument name :param: instid : (Integer) unique instrument identifier. Used to edit an existing instrument :param: description : (String) description of the instrument and its :param: email : (String) contact information for the instrument operator :param: Files : (Optional) JSON encoded list of instrument data source objects. Each Contains the following keys: | ``name`` : (String) name of the data source | ``metaid`` : (Integer) unique data source identifier. Only used to edit existing data sources | ``type`` : (Integer) type of instrument data source | ``node`` : (String) unique network node identifier | ``remarks`` : (String) JSON encoded object of extra data source information | ``channels`` : (Optional) JSON encoded list of data channel objects. Each contains the following keys: | ``Name`` : (String) data channel name | ``channelid`` : (Integer) unique channel identifier. -1 if the channel is new. Positive integer if the channel already exists | ``description`` : (String) data channel description | ``units`` : (String) data channel physical units | ``channeltype`` : (Integer) type of data channel | ``0`` : Number | ``1`` : Text | ``2`` : Custom | ``active`` : (Boolean) channel is shown on interface | ``fileorder`` : (Integer) Used to order channels in a data source | ``alias`` : (String) Original data channel name. Kept constant when name changes | ``remarks`` : (String) JSON encoded object with extra information | ``oldname`` : (String) Old channel name. Used to detect changes in the channel name | ``channeltypeOld`` : (Integer) Old channel type. Used to detect changes in the channel type """ processRequest = request.get_json() Session = sessionmaker(bind=current_user.engineObj) session = Session() conn = current_user.engineObj.connect() ctx = MigrationContext.configure(conn) op = Operations(ctx) try: if 'instid' in processRequest: newInst = False instid = processRequest['instid'] instrument = session.query(daqbrokerDatabase.instruments).filter_by(instid=instid).first() else: newInst = True maxInst = session.query(func.max(daqbrokerDatabase.instruments.instid)).one_or_none() # print(maxInst==None) if maxInst[0]: maxInstid = maxInst[0] else: maxInstid = 0 instid = maxInstid + 1 instrument = daqbrokerDatabase.instruments( Name=processRequest['Name'], instid=instid, active=False, description=processRequest['description'], username=current_user.username, email=processRequest['email'], insttype=0, log=None) # Now I have an object called "instrument" that I can use to add sources # and metadatas and to those metadatas I should be able to add channels. for file in processRequest['files']: if 'metaid' in file: metadata = session.query(daqbrokerDatabase.instmeta).filter_by(metaid=file["metaid"]).first() metadata.clock = time.time() * 1000 metadata.name= file['name'] metadata.type=file['type'] metadata.node=file['node'] metadata.remarks=json.dumps(file['remarks']) else: maxMeta = session.query(func.max(daqbrokerDatabase.instmeta.metaid)).first() if maxMeta[0]: maxMetaid = maxMeta[0] else: maxMetaid = 0 metaid = maxMetaid + 1 metadata = daqbrokerDatabase.instmeta( clock=time.time() * 1000, name=file['name'], metaid=metaid, type=file["type"], node=file["node"], remarks=json.dumps( file['remarks']), sentRequest=False, lastAction=0, lasterrortime=0, lasterror='', lockSync=False) instrument.sources.append(metadata) channelid = None if 'channels' in file: channelsInsert = [] for channel in file['channels']: if int(channel['channelid']) < 0: # New channel - have to insert maxChannel = session.query(func.max(daqbrokerDatabase.channels.channelid)).first() if not channelid: if maxChannel[0]: maxChannelid = maxChannel[0] else: maxChannelid = 0 channelid = maxChannelid + 1 else: channelid = channelid + 1 if 'remarks' in channel: if len(channel["remarks"].keys())>0: theRemarks = json.dumps(channel["remarks"]) else: theRemarks = json.dumps({}) else: theRemarks = json.dumps({}) theChannel = daqbrokerDatabase.channels( Name=channel["Name"], channelid=channelid, channeltype=int( channel["channeltype"]), valuetype=0, units=channel['units'], description=channel['description'], active=int( channel['active']) == 1, remarks=theRemarks, lastclock=0, lastValue=None, firstClock=0, fileorder=channel['fileorder'], alias=channel['alias']) metadata.channels.append(theChannel) channelsInsert.append({'name': channel["Name"], 'type': int(channel["channeltype"])}) if not newInst: extra = '' if int(channel['channeltype']) == 1: newType = daqbrokerDatabase.Float extra = "\"" + channel["Name"] + "\"::double precision" column = daqbrokerDatabase.Column(channel["Name"], newType) op.add_column(processRequest['Name'] + "_data", column) elif int(channel['channeltype']) == 2: newType = daqbrokerDatabase.Text column = daqbrokerDatabase.Column(channel["Name"], newType) op.add_column(processRequest['Name'] + "_data", column) elif int(channel['channeltype']) == 3: extra = "\"" + channel["Name"] + "\"::double precision" theType = daqbrokerDatabase.Float column = daqbrokerDatabase.Column(channel["Name"], newType) op.add_column(processRequest['Name'] + "_custom", column) elif not newInst: theChannel = session.query( daqbrokerDatabase.channels).filter_by( channelid=channel['channelid']).first() theChannel.Name = channel["Name"] theChannel.channeltype = int(channel["channeltype"]) theChannel.units = channel['units'] theChannel.description = channel['description'] theChannel.active = int(channel['active']) == 1 theChannel.fileorder = channel['fileorder'] theChannel.alias = channel['alias'] if (not channel['channeltypeOld'] == channel['channeltype']) or ( not channel['oldName'] == str(channel['Name'])): if not channel['oldName'] == str(channel['Name']): newName = str(channel['Name']) oldName = channel['oldName'] else: oldName = str(channel['Name']) newName = None if not channel['channeltypeOld'] == channel['channeltype']: if channel['channeltype'] == 1 or channel['channeltype'] == 3: newType = daqbrokerDatabase.Float extra = "\"" + oldName + "\"::double precision" else: newType = daqbrokerDatabase.Text extra = None else: newType = None if not channel['channeltypeOld'] == channel['channeltype'] and channel['channeltype'] == 3: if not newName: theName = oldName else: theName = newName if not newType: theType = daqbrokerDatabase.Float else: theType = newType column = daqbrokerDatabase.Column(theName, theType) op.drop_column(processRequest['Name'] + "_data", oldName) op.add_column(processRequest['Name'] + "_custom", column) elif not channel['channeltypeOld'] == channel['channeltype'] and channel['channeltypeOld'] != 3: if not newName: theName = oldName else: theName = newName if not newType: if channel['channeltypeOld'] == 1: theType = daqbrokerDatabase.Float else: theType = daqbrokerDatabase.Text else: theType = newType column = daqbrokerDatabase.Column(theName, theType) op.drop_column(processRequest['Name'] + "_custom", oldName) op.add_column(processRequest['Name'] + "_data", column) else: if channel['channeltype'] == 1 or channel['channeltype'] == 2: if extra: op.alter_column( processRequest['Name'] + "_data", oldName, new_column_name=newName, type_=newType, postgresql_using=extra) else: op.alter_column( processRequest['Name'] + "_data", oldName, new_column_name=newName, type_=newType) else: if extra=='': op.alter_column( processRequest['Name'] + "_custom", oldName, new_column_name=newName, type_=newType) else: op.alter_column( processRequest['Name'] + "_data", oldName, new_column_name=newName, type_=newType, postgresql_using=extra) elif newInst: raise InvalidUsage("Cannot issue edit channels on new instrument", status_code=401) if newInst: daqbrokerDatabase.createInstrumentTable(processRequest['Name'], channelsInsert, True) session.add(instrument) daqbrokerDatabase.daqbroker_database.metadata.create_all(current_user.engineObj) session.commit() conn.close() current_user.updateDB() return jsonify('done') except Exception as e: traceback.print_exc() session.rollback() # for statement in deleteStatements: # connection.execute(statement) raise InvalidUsage(str(e), status_code=500)
537b89fa72b161c86c4b49a7a6813cf95df45f09
3,708
def find_distance_to_major_settlement(country, major_settlements, settlement): """ Finds the distance to the nearest major settlement. """ nearest = nearest_points(settlement['geometry'], major_settlements.unary_union)[1] geom = LineString([ ( settlement['geometry'].coords[0][0], settlement['geometry'].coords[0][1] ), ( nearest.coords[0][0], nearest.coords[0][1] ), ]) distance_km = round(geom.length / 1e3) return distance_km
0bb33d3777ab0f60dfb05e69cb2f5ed711585b17
3,709
def x_for_half_max_y(xs, ys): """Return the x value for which the corresponding y value is half of the maximum y value. If there is no exact corresponding x value, one is calculated by linear interpolation from the two surrounding values. :param xs: x values :param ys: y values corresponding to the x values :return: """ if len(xs) != len(ys): raise ValueError("xs and ys must be of equal length") half_max_y = max(ys) / 2 for i in range(len(xs)-1): if ys[i+1] >= half_max_y: x_dist = xs[i+1] - xs[i] y_dist = ys[i+1] - ys[i] y_offset = half_max_y - ys[i] if y_offset == 0: return xs[i] else: x_offset = y_offset / y_dist * x_dist return xs[i] + x_offset return None
b18525664c98dc05d72a29f2904a13372f5696eb
3,710
from typing import Union from pathlib import Path from typing import Dict def get_dict_from_dotenv_file(filename: Union[Path, str]) -> Dict[str, str]: """ :param filename: .env file where values are extracted. :return: a dict with keys and values extracted from the .env file. """ result_dict = {} error_message = 'file {filename}: the line n°{index} is not correct: "{line}"' with open(filename) as f: for index, line in enumerate(f): stripped_line = line.strip() # we don't take into account comments if stripped_line.startswith('#'): continue # we don't take into account empty lines if not stripped_line: continue parts = stripped_line.split('#') # we remove inline comments if there are any # we remove set or export command if there are any new_line = SET_EXPORT_EXPRESSION.sub('', parts[0].strip()) # we get key and value parts = new_line.split('=') parts = _sanitize_key_and_value(parts) if len(parts) != 2 or ITEM_EXPRESSION.match(parts[0]) is None \ or ITEM_EXPRESSION.match(parts[1]) is None: line_number = index + 1 raise DecodeError(message=error_message.format(filename=filename, index=line_number, line=new_line)) result_dict[parts[0]] = parts[1] return result_dict
97bce5a69e29f9606a58a54ed5cc4d05ab45fbca
3,711
def calculate_note_numbers(note_list, key_override = None): """ Takes in a list of notes, and replaces the key signature (second element of each note tuple) with the note's jianpu number. Parameters ---------- note_list : list of tuples List of notes to calculate jianpu numbers for. key_override : str If this is provided, all notes will be assumed to be in the given key. """ note_list_numbered = [] for note in note_list: if note[0] != -1: if(note[1] == 'C' or key_override == 'C'): offset = 0 elif(note[1] == 'C#' or key_override == 'C#' or note[1] == 'Db' or key_override == 'Db'): offset = 1 elif(note[1] == 'D' or key_override == 'D'): offset = 2 elif(note[1] == 'D#' or key_override == 'D#' or note[1] == 'Eb' or key_override == 'Eb'): offset = 3 elif(note[1] == 'E' or key_override == 'E'): offset = 4 elif(note[1] == 'F' or key_override == 'F'): offset = 5 elif(note[1] == 'F#' or key_override == 'F#' or note[1] == 'Gb' or key_override == 'Gb'): offset = 6 elif(note[1] == 'G' or key_override == 'G'): offset = 7 elif(note[1] == 'G#' or key_override == 'G#' or note[1] == 'Ab' or key_override == 'Ab'): offset = 8 elif(note[1] == 'A' or key_override == 'A'): offset = 9 elif(note[1] == 'A#' or key_override == 'A#' or note[1] == 'Bb' or key_override == 'Bb'): offset = 10 elif(note[1] == 'B' or key_override == 'B'): offset = 11 num = (note[0]-offset) - ((note[0]-offset)//12)*12 num_to_jianpu = { 0:1, 1:1.5, 2:2, 3:2.5, 4:3, 5:4, 6:4.5, 7:5, 8:5.5, 9:6, 10:6.5, 11:7} jianpu = num_to_jianpu[num] note_list_numbered.append((note[0], jianpu, note[2], note[3])) else: note_list_numbered.append(note) return note_list_numbered
a32bbae7f64b381ad3384fdd8ae5c045c6887c87
3,712
import numpy as np def _from_Gryzinski(DATA): """ This function computes the cross section and energy values from the files that store information following the Gryzinski Model """ a_0 = DATA['a_0']['VALUES'] epsilon_i_H = DATA['epsilon_i_H']['VALUES'] epsilon_i = DATA['epsilon_i']['VALUES'] xi = DATA['xi']['VALUES'] final_E = DATA['Final_E']['VALUES'] Energy_range = np.linspace(epsilon_i, final_E, 200) u = Energy_range/epsilon_i gg = (1+2/3*(1-1/(2*u))*np.log(np.e+(u-1)**(1/2))) g = ((u-1)/u**2)*((u/(u+1))**(3/2))*((1-1/u)**(1/2))*gg Cross_sections = 4*np.pi*(a_0**2)*((epsilon_i_H/epsilon_i)**2)*xi*g return(Energy_range, Cross_sections)
925fb1e76bf23915385cf56e3a663d111615700d
3,713
from haversine import haversine def stations_within_radius(stations, centre, r): """Returns an alphabetically-ordered list of the names of all the stations (in a list of stations objects) within a radius r (in km) of a central point (which must be a Lat/Long coordinate)""" # creates empty list name_list = [] # extracts the necessary data from the list of stations for i in range(len(stations)): station_entry = stations[i] s_coord = station_entry.coord s_distance = haversine(s_coord, centre) # Determines if the station is within the radius if s_distance <= r: s_name = station_entry.name name_list.append(s_name) #sorts the list name_list.sort() return name_list
36bf8312e4295638c1e297f4a31b535c9ee96eaf
3,714
from typing import Union from typing import Sequence from typing import Optional from typing import List def read_data_with_plugins( path: Union[str, Sequence[str]], plugin: Optional[str] = None, plugin_manager: PluginManager = napari_plugin_manager, ) -> List[LayerData]: """Iterate reader hooks and return first non-None LayerData or None. This function returns as soon as the path has been read successfully, while catching any plugin exceptions, storing them for later retrievial, providing useful error messages, and relooping until either layer data is returned, or no valid readers are found. Exceptions will be caught and stored as PluginErrors (in plugins.exceptions.PLUGIN_ERRORS) Parameters ---------- path : str The path (file, directory, url) to open plugin : str, optional Name of a plugin to use. If provided, will force ``path`` to be read with the specified ``plugin``. If the requested plugin cannot read ``path``, a PluginCallError will be raised. plugin_manager : plugins.PluginManager, optional Instance of a napari PluginManager. by default the main napari plugin_manager will be used. Returns ------- LayerData : list of tuples, or None LayerData that can be passed to :func:`Viewer._add_layer_from_data() <napari.components.add_layers_mixin.AddLayersMixin._add_layer_from_data>`. ``LayerData`` is a list tuples, where each tuple is one of ``(data,)``, ``(data, meta)``, or ``(data, meta, layer_type)`` . If no reader plugins are (or they all error), returns ``None`` Raises ------ PluginCallError If ``plugin`` is specified but raises an Exception while reading. """ hook_caller = plugin_manager.hook.napari_get_reader if plugin: if plugin not in plugin_manager.plugins: names = {i.plugin_name for i in hook_caller.get_hookimpls()} raise ValueError( f"There is no registered plugin named '{plugin}'.\n" f"Names of plugins offering readers are: {names}" ) reader = hook_caller._call_plugin(plugin, path=path) if not callable(reader): raise ValueError(f'Plugin {plugin!r} does not support file {path}') return reader(path) or [] errors: List[PluginCallError] = [] path = abspath_or_url(path) skip_impls: List[HookImplementation] = [] layer_data = None while True: result = hook_caller.call_with_result_obj( path=path, _skip_impls=skip_impls ) reader = result.result # will raise exceptions if any occurred if not reader: # we're all out of reader plugins break try: layer_data = reader(path) # try to read data if layer_data: break except Exception as exc: # collect the error and log it, but don't raise it. err = PluginCallError(result.implementation, cause=exc) err.log(logger=logger) errors.append(err) # don't try this impl again skip_impls.append(result.implementation) if not layer_data: # if layer_data is empty, it means no plugin could read path # we just want to provide some useful feedback, which includes # whether or not paths were passed to plugins as a list. if isinstance(path, (tuple, list)): path_repr = f"[{path[0]}, ...] as stack" else: path_repr = repr(path) # TODO: change to a warning notification in a later PR raise ValueError(f'No plugin found capable of reading {path_repr}.') if errors: names = set([repr(e.plugin_name) for e in errors]) err_msg = f"({len(errors)}) error{'s' if len(errors) > 1 else ''} " err_msg += f"occurred in plugins: {', '.join(names)}. " err_msg += 'See full error logs in "Plugins → Plugin Errors..."' logger.error(err_msg) return layer_data or []
e6b83c4a55a39b07bfc8eddcd77c82351f91b1d7
3,715
def ticket_message_url(request, structure_slug, ticket_id): # pragma: no cover """ Makes URL redirect to add ticket message by user role :type structure_slug: String :type ticket_id: String :param structure_slug: structure slug :param ticket_id: ticket code :return: redirect """ structure = get_object_or_404(OrganizationalStructure, slug=structure_slug) user_type = get_user_type(request.user, structure) return redirect('uni_ticket:{}_ticket_message'.format(user_type), structure_slug, ticket_id)
29cec06302e943d74236ff82647cd28deb634107
3,716
from pathlib import Path from datetime import datetime from re import T def load( fin: Path, azelfn: Path = None, treq: list[datetime] = None, wavelenreq: list[str] = None, wavelength_altitude_km: dict[str, float] = None, ) -> dict[str, T.Any]: """ reads FITS images and spatial az/el calibration for allsky camera Bdecl is in degrees, from IGRF model """ fin = Path(fin).expanduser() if fin.is_file() and fin.suffix in (".h5", ".hdf5"): return load_hdf5(fin, treq, wavelenreq) flist = _slicereq(fin, treq, wavelenreq) if not flist: raise FileNotFoundError(f"No files found in {fin}") # %% load data from good files, discarding bad imgs = _sift(flist) # %% camera location imgs = _camloc(imgs, flist[0].parent) # %% az / el imgs = _azel(azelfn, imgs) # %% projections imgs = _project(imgs, wavelength_altitude_km) return imgs
bb656482c1db134c3045ac5a30b9cecb8d9f6716
3,717
import logging def loadData(data_source, loc, run, indexes, ntry=0, __text__=None, __prog__=None): """ Loads the data from a remote source. Has hooks for progress bars. """ if __text__ is not None: __text__.emit("Decoding File") if data_source.getName() == "Local WRF-ARW": url = data_source.getURLList(outlet="Local")[0].replace("file://", "") decoder = ARWDecoder dec = decoder((url, loc[0], loc[1])) else: decoder, url = data_source.getDecoderAndURL(loc, run, outlet_num=ntry) logging.info("Using decoder: " + str(decoder)) logging.info("Data URL: " + url) dec = decoder(url) if __text__ is not None: __text__.emit("Creating Profiles") profs = dec.getProfiles(indexes=indexes) return profs
d06fd6fb6194dac63911a5b9c3ad267525098cd2
3,718
def comp_psip_skin(self, u): """psip_skin for skin effect computation Parameters ---------- self : Conductor An Conductor object Returns ------- None """ y = (1 / u) * (sinh(u) + sin(u)) / (cosh(u) + cos(u)) # p257 Pyrhonen # y[u==0]=1 return y
23fe18a13b56f38c49fd3ca14b557983064c65b3
3,719
import random def homepage(var=random.randint(0, 1000)): """ The function returns the homepage html template. """ return render_template("index.html", var=var)
17e9033b8abeaa990cd31008861e5c412e35d1d7
3,720
import numbers def tier(value): """ A special function of ordinals which does not correspond to any mathematically useful function. Maps ordinals to small objects, effectively compressing the range. Used to speed up comparisons when the operands are very different sizes. In the current version, this is a map from ordinals to 2-tuples of integers, however, this is subject to change at any time, so please do not retain long lived records of what tier an ordinal number is. """ if isinstance(value, numbers.Real): value = ordinal(value) if isinstance(value, ordinal): return value._tier raise ValueError('Value is not of a known type representing a mathematical ordinal.')
851b36cf22c09d8f94168a0aa55292754451e351
3,721
from .models import Sequence def get_next_value( sequence_name="default", initial_value=1, reset_value=None, *, nowait=False, using=None, overrite=None, ): """ Return the next value for a given sequence. """ # Inner import because models cannot be imported before their application. if reset_value is not None: assert initial_value < reset_value if using is None: using = router.db_for_write(Sequence) connection = connections[using] db_table = connection.ops.quote_name(Sequence._meta.db_table) if ( connection.vendor == "postgresql" # Remove when dropping Django 2.2. Django 3.0 requires PostgreSQL 9.5. and getattr(connection, "pg_version", 0) >= 90500 and reset_value is None and not nowait ): # PostgreSQL ≥ 9.5 supports "upsert". # This is about 3x faster as the naive implementation. with connection.cursor() as cursor: cursor.execute( POSTGRESQL_UPSERT.format(db_table=db_table), [sequence_name, initial_value], ), result = cursor.fetchone() return result[0] elif connection.vendor == "mysql" and reset_value is None and not nowait: # MySQL supports "upsert" but not "returning". # This is about 2x faster as the naive implementation. with transaction.atomic(using=using, savepoint=False): with connection.cursor() as cursor: cursor.execute( MYSQL_UPSERT.format(db_table=db_table), [sequence_name, initial_value], ) cursor.execute( SELECT.format(db_table=db_table), [sequence_name], ) result = cursor.fetchone() return result[0] else: # Default, ORM-based implementation for all other cases. with transaction.atomic(using=using, savepoint=False): sequences = Sequence.objects.select_for_update(nowait=nowait) sequence, created = sequences.get_or_create( name=sequence_name, defaults={"last": initial_value}, ) if not created: sequence.last += 1 if reset_value is not None and sequence.last >= reset_value: sequence.last = initial_value if overrite is not None: sequence.last = overrite sequence.save() return sequence.last
0770c8d4a4bea732bacfe6b7eaa404546bb79699
3,722
def expected_shd(posterior, ground_truth): """Compute the Expected Structural Hamming Distance. This function computes the Expected SHD between a posterior approximation given as a collection of samples from the posterior, and the ground-truth graph used in the original data generation process. Parameters ---------- posterior : np.ndarray instance Posterior approximation. The array must have size `(B, N, N)`, where `B` is the number of sample graphs from the posterior approximation, and `N` is the number of variables in the graphs. ground_truth : np.ndarray instance Adjacency matrix of the ground-truth graph. The array must have size `(N, N)`, where `N` is the number of variables in the graph. Returns ------- e_shd : float The Expected SHD. """ # Compute the pairwise differences diff = np.abs(posterior - np.expand_dims(ground_truth, axis=0)) diff = diff + diff.transpose((0, 2, 1)) # Ignore double edges diff = np.minimum(diff, 1) shds = np.sum(diff, axis=(1, 2)) / 2 return np.mean(shds)
5e0daf39a13fc0a4cb7a4f5d0a9fe692fdae82db
3,723
import json def package_list_read(pkgpath): """Read package list""" try: with open(PACKAGE_LIST_FILE, 'r') as pkglistfile: return json.loads(pkglistfile.read()) except Exception: return []
afb97afd20823563ecfda3b5c908f7ad70322868
3,724
import pandas import types def hpat_pandas_series_le(self, other, level=None, fill_value=None, axis=0): """ Pandas Series method :meth:`pandas.Series.le` implementation. .. only:: developer Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8 Parameters ---------- self: :class:`pandas.Series` input arg other: :obj:`pandas.Series`, :obj:`int` or :obj:`float` input arg level: :obj:`int` or name *unsupported* fill_value: :obj:`float` or None, default None *unsupported* axis: default 0 *unsupported* Returns ------- :obj:`pandas.Series` returns :obj:`pandas.Series` object """ _func_name = 'Method le().' if not isinstance(self, SeriesType): raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self)) if level is not None or fill_value is not None or axis != 0: raise TypingError('{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value, axis)) if isinstance(other, SeriesType): def hpat_pandas_series_le_impl(self, other): """ Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8 """ return pandas.Series(self._data <= other._data) return hpat_pandas_series_le_impl if isinstance(other, types.Integer) or isinstance(other, types.Float): def hpat_pandas_series_le_impl(self, other): """ Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8_integer_scalar Test: python -m hpat.runtests hpat.tests.test_series.TestSeries.test_series_op8_float_scalar """ return pandas.Series(self._data <= other) return hpat_pandas_series_le_impl raise TypingError('{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(_func_name, self, other))
f2969e17dd79b71a033e3c84ffd82e3bf2448554
3,725
def add_obs_info(telem, obs_stats): """ Add observation-specific information to a telemetry table (ok flag, and outlier flag). This is done as part of get_agasc_id_stats. It is a convenience for writing reports. :param telem: list of tables One or more telemetry tables (potentially many observations) :param obs_stats: table The result of calc_obs_stats. :return: """ logger.debug(' Adding observation info to telemetry...') obs_stats['obs_ok'] = ( (obs_stats['n'] > 10) & (obs_stats['f_track'] > 0.3) & (obs_stats['lf_variability_100s'] < 1) ) obs_stats['comments'] = np.zeros(len(obs_stats), dtype='<U80') telem = vstack(telem) telem['obs_ok'] = True telem['obs_outlier'] = False for s in obs_stats: obsid = s['obsid'] o = (telem['obsid'] == obsid) telem['obs_ok'][o] = np.ones(np.sum(o), dtype=bool) * s['obs_ok'] if (np.any(telem['ok'][o]) and s['f_track'] > 0 and np.isfinite(s['q75']) and np.isfinite(s['q25'])): iqr = s['q75'] - s['q25'] telem['obs_outlier'][o] = ( telem[o]['ok'] & (iqr > 0) & ((telem[o]['mags'] < s['q25'] - 1.5 * iqr) | (telem[o]['mags'] > s['q75'] + 1.5 * iqr)) ) logger.debug(f' Adding observation info to telemetry {obsid=}') return telem
beea2b87337412f34d1a854c14eebf46265f78c9
3,726
def map_view(request): """ Place to show off the new map view """ # Define view options view_options = MVView( projection='EPSG:4326', center=[-100, 40], zoom=3.5, maxZoom=18, minZoom=2 ) # Define drawing options drawing_options = MVDraw( controls=['Modify', 'Delete', 'Move', 'Point', 'LineString', 'Polygon', 'Box'], initial='Point', output_format='GeoJSON' ) # Define GeoJSON layer geojson_object = { 'type': 'FeatureCollection', 'crs': { 'type': 'name', 'properties': { 'name': 'EPSG:3857' } }, 'features': [ { 'type': 'Feature', 'geometry': { 'type': 'Point', 'coordinates': [0, 0] } }, { 'type': 'Feature', 'geometry': { 'type': 'LineString', 'coordinates': [[4e6, -2e6], [8e6, 2e6]] } }, { 'type': 'Feature', 'geometry': { 'type': 'Polygon', 'coordinates': [[[-5e6, -1e6], [-4e6, 1e6], [-3e6, -1e6]]] } } ] } # Define layers map_layers = [] geojson_layer = MVLayer(source='GeoJSON', options=geojson_object, editable=False, legend_title='Test GeoJSON', legend_extent=[-46.7, -48.5, 74, 59], legend_classes=[ MVLegendClass('polygon', 'Polygons', fill='rgba(255,255,255,0.8)', stroke='#3d9dcd'), MVLegendClass('line', 'Lines', stroke='#3d9dcd') ]) map_layers.append(geojson_layer) if get_geoserver_wms(): # Define GeoServer Layer geoserver_layer = MVLayer(source='ImageWMS', options={'url': get_geoserver_wms(), 'params': {'LAYERS': 'topp:states'}, 'serverType': 'geoserver'}, legend_title='USA Population', legend_extent=[-126, 24.5, -66.2, 49], legend_classes=[ MVLegendClass('polygon', 'Low Density', fill='#00ff00', stroke='#000000'), MVLegendClass('polygon', 'Medium Density', fill='#ff0000', stroke='#000000'), MVLegendClass('polygon', 'High Density', fill='#0000ff', stroke='#000000') ]) map_layers.append(geoserver_layer) # Define KML Layer kml_layer = MVLayer(source='KML', options={'url': '/static/tethys_gizmos/data/model.kml'}, legend_title='Park City Watershed', legend_extent=[-111.60, 40.57, -111.43, 40.70], legend_classes=[ MVLegendClass('polygon', 'Watershed Boundary', fill='#ff8000'), MVLegendClass('line', 'Stream Network', stroke='#0000ff'), ]) map_layers.append(kml_layer) # Tiled ArcGIS REST Layer arc_gis_layer = MVLayer(source='TileArcGISRest', options={'url': 'http://sampleserver1.arcgisonline.com/ArcGIS/rest/services/' + 'Specialty/ESRI_StateCityHighway_USA/MapServer'}, legend_title='ESRI USA Highway', legend_extent=[-173, 17, -65, 72]) map_layers.append(arc_gis_layer) # Define map view options map_view_options = MapView( height='600px', width='100%', controls=['ZoomSlider', 'Rotate', 'FullScreen', {'MousePosition': {'projection': 'EPSG:4326'}}, {'ZoomToExtent': {'projection': 'EPSG:4326', 'extent': [-130, 22, -65, 54]}}], layers=map_layers, view=view_options, basemap='OpenStreetMap', draw=drawing_options, legend=True ) submitted_geometry = request.POST.get('geometry', None) if submitted_geometry is not None: messages.info(request, submitted_geometry) context = {'map_view': map_view_options} return render(request, 'tethys_gizmos/gizmo_showcase/map_view.html', context)
5d037262b2c93c538b5a5b6fe076ee04a9d9b5ee
3,727
def decompose_jamo(compound): """Return a tuple of jamo character constituents of a compound. Note: Non-compound characters are echoed back. WARNING: Archaic jamo compounds will raise NotImplementedError. """ if len(compound) != 1: raise TypeError("decompose_jamo() expects a single character,", "but received", type(compound), "length", len(compound)) if compound not in JAMO_COMPOUNDS: # Strict version: # raise TypeError("decompose_jamo() expects a compound jamo,", # "but received", compound) return compound return _JAMO_TO_COMPONENTS.get(compound, compound)
56eb503b47a966d7f88750f7fdc1bcc55ba1aa1b
3,728
from typing import Optional def cp_in_drive( source_id: str, dest_title: Optional[str] = None, parent_dir_id: Optional[str] = None, ) -> DiyGDriveFile: """Copy a specified file in Google Drive and return the created file.""" drive = create_diy_gdrive() if dest_title is None: dest_title = build_dest_title(drive, source_id) return drive.copy_file(source_id, dest_title, parent_dir_id)
981cfa18da78a160447778cab5f3326f35dbfc59
3,729
def label_tuning( text_embeddings, text_labels, label_embeddings, n_steps: int, reg_coefficient: float, learning_rate: float, dropout: float, ) -> np.ndarray: """ With N as number of examples, K as number of classes, k as embedding dimension. Args: 'text_embeddings': float[N,k] of embedded texts 'text_labels': float[N,K] class score for each example. 'label_embeddings': float[K,k] class embeddings Returns: float[K,k] updated class embeddings """ if text_embeddings.shape[0] == 0: raise ValueError(text_embeddings.shape) if label_embeddings.shape[0] == 0: raise ValueError(label_embeddings.shape) text_embeddings = tf.constant(text_embeddings) text_labels = tf.constant(text_labels) label_embeddings = tf.constant(label_embeddings) init_label_embeddings = label_embeddings for i in range(n_steps): with tf.GradientTape() as tape: tape.watch(label_embeddings) dot_loss = _get_loss( text_embeddings, text_labels, label_embeddings, dropout=dropout, ) drift_loss = tf.reduce_mean( (label_embeddings - init_label_embeddings) ** 2 ) total_loss = dot_loss + reg_coefficient * drift_loss gradient = tape.gradient(total_loss + drift_loss, label_embeddings) label_embeddings = label_embeddings - (learning_rate * gradient) label_embeddings = label_embeddings.numpy() return label_embeddings
83e4181c6600065bfb2cc98b4ca4957ea920ad7c
3,730
def create_nan_filter(tensor): """Creates a layer which replace NaN's with zero's.""" return tf.where(tf.is_nan(tensor), tf.zeros_like(tensor), tensor)
4e03c4c4c275430e5228e2d73b09e24f8c787e71
3,731
def requestor_is_superuser(requestor): """Return True if requestor is superuser.""" return getattr(requestor, "is_superuser", False)
7b201601cf8a1911aff8271ff71b6d4d51f68f1a
3,732
from typing import Dict def process(business: Business, # pylint: disable=too-many-branches filing: Dict, filing_rec: Filing, filing_meta: FilingMeta): # pylint: disable=too-many-branches """Process the incoming historic conversion filing.""" # Extract the filing information for incorporation if not (conversion_filing := filing.get('filing', {}).get('conversion')): raise QueueException(f'CONVL legal_filing:conversion missing from {filing_rec.id}') if business: raise QueueException(f'Business Already Exist: CONVL legal_filing:conversion {filing_rec.id}') if not (corp_num := filing.get('filing', {}).get('business', {}).get('identifier')): raise QueueException(f'conversion {filing_rec.id} missing the business idnetifier.') # Initial insert of the business record business_info_obj = conversion_filing.get('nameRequest') if not (business := business_info.update_business_info(corp_num, Business(), business_info_obj, filing_rec)): raise QueueException(f'CONVL conversion {filing_rec.id}, Unable to create business.') if offices := conversion_filing.get('offices'): update_offices(business, offices) if parties := conversion_filing.get('parties'): update_parties(business, parties) if share_structure := conversion_filing.get('shareStructure'): shares.update_share_structure(business, share_structure) if name_translations := conversion_filing.get('nameTranslations'): aliases.update_aliases(business, name_translations) return business, filing_rec
78f5033251cb90023c2e0c0ad064b92af5212e65
3,733
def est_const_bsl(bsl,starttime=None,endtime=None,intercept=False,val_tw=None): """Performs a linear regression (assuming the intercept at the origin). The corresponding formula is tt-S*1/v-c = 0 in which tt is the travel time of the acoustic signal in seconds and 1/v is the reciprocal of the harmonic mean of the sound speed. The slope S is equal to the constant baseline length and by default c is assumed to be 0, but can optionally also be determined (intercept=True). It needs: bsl ... pandas.Dataframe with ID of beacon 1 ('ID'), ID of beacon 2 ('range_ID'), calculated baseline lengths in metres ('bsl'), one way traveltime in seconds ('tt'), sound speed at beacon 1 ('ssp1') in metres per second, sound speed at beacon 2 ('ssp2') in metres per second, measured traveltime in milliseconds ('range'), turn around time in milliseconds ('TAT')(eventually harmonic mean of 'ssp1' and 'ssp2' ('hmssp') and reciprocal of harmonic mean of 'ssp1' and 'ssp2' ('1/v'); if they do not exist, they will be calculated) with corresponding times of measurement for beacon pair. starttime (optional) ... string with starttime of time window for estimation of constant baseline length (format: 'YYYY-mm-dd HH:MM:SS', default: first entry in bsl) endtime (optional) ... string with endtime of time window for estimation of constant baseline length (format: 'YYYY-mm-dd HH:MM:SS', default: last entry in bsl) intercept (optional) ... specify whether intercept should be set to 0 [False] or should be calculated [True] (default is False) val_tw (optional) ... specify time window for which estimated constant baseline length and standard deviation (as well as intercept) will be stored in returned pandas.Dataframe (format: ['YYYY-mm-dd HH:MM:SS', 'YYYY-mm-dd HH:MM:SS'], default is starttime and endtime) It returns: bsl ... pandas.Dataframe with ID of beacon 1 ('ID'), ID of beacon 2 ('range_ID'), calculated baseline lengths in metres ('bsl'), one way traveltime in seconds ('tt'), sound speed at beacon 1 ('ssp1') in metres per second, sound speed at beacon 2 ('ssp2') in metres per second, measured traveltime in milliseconds ('range'), turn around time in milliseconds ('TAT'), harmonic mean of 'ssp1' and 'ssp2' ('hmssp'), reciprocal of harmonic mean of 'ssp1' and 'ssp2' ('1/v'), constant baseline length ('bsl_const') in given time window and standard deviation of the measurements compared to the fitted line in seconds (sigma = sqrt(sum((tt-S*1/v)^2)/(len(1/v)-1)), 'std_dev_tt') in given time window (and intercept ('intercept') ) with corresponding times of measurement for beacon pair. """ # check if columns 'hmssp' and '1/v' (harmonic mean of sound speeds and its # reciprocal already exist in bsl and if not then calculate them if not set(['hmssp','1/v']).issubset(bsl.columns): bsl = calc_hmssp_recp_v(bsl) # end if not set(['hmssp','1/v']).issubset(bsl.columns): # copy bsl to new pandas.Dataframe to cut it in time bsl_new = bsl.copy() # check if time window for estimation of constant baseline length is given if starttime is not None: bsl_new = bsl_new.loc[starttime:] else: # set startime to first index in bsl starttime = bsl_new.index[0] # end if starttime is not None: if endtime is not None: bsl_new = bsl_new.loc[:endtime] else: # set endtime to last index in bsl endtime = bsl_new.index[-1] # end if endtime is not None: # the numpy function numpy.linalg.lstsq() needs x as (M,N) matrix if not intercept: x = bsl_new['1/v'][:,np.newaxis] else: x = np.array(([[bsl_new['1/v'][j], 1] for j in range(len(bsl_new))])) # end if not intercept: S,residuals,_,_ = np.linalg.lstsq(x,bsl_new['tt']) sigma = np.sqrt(residuals/(len(x)-1)) # set column 'bsl_const' for values between starttime and endtime to S and # column 'std_dev_tt' to estimated sigma in bsl if val_tw is not None: starttime = val_tw[0] endtime = val_tw[1] # end if val_tw is not None: if not intercept: bsl.loc[starttime:endtime,'bsl_const'] = S else: bsl.loc[starttime:endtime,'bsl_const'] = S[0] bsl.loc[starttime:endtime,'intercept'] = S[1] # end if not intercept: bsl.loc[starttime:endtime,'std_dev_tt'] = sigma return(bsl)
906119dcc66f4ab536d4a89c9c9b633bb6835058
3,734
def SeasonUPdate(temp): """ Update appliance characteristics given the change in season Parameters ---------- temp (obj): appliance set object for an individual season Returns ---------- app_expected_load (float): expected load power in Watts app_expected_dur (float): expected duration in hours appliance_set (list of applience objects): applience list for a given season t_delta_exp_dur (pandas datetime): expected appliance duration app_index (array): index for each applience """ app_expected_load = temp.app_expected_load app_expected_dur = temp.app_expected_dur appliance_set = temp.appliance_set t_delta_exp_dur = temp.t_delta_exp_dur app_index = np.arange(0,len(temp.appliance_set)) return app_expected_load,app_expected_dur,appliance_set,t_delta_exp_dur,app_index
7fdfa932bedf2ac17490df6aaeedb547e1774c4d
3,735
def pad_and_reshape(instr_spec, frame_length, F): """ :param instr_spec: :param frame_length: :param F: :returns: """ spec_shape = tf.shape(instr_spec) extension_row = tf.zeros((spec_shape[0], spec_shape[1], 1, spec_shape[-1])) n_extra_row = (frame_length) // 2 + 1 - F extension = tf.tile(extension_row, [1, 1, n_extra_row, 1]) extended_spec = tf.concat([instr_spec, extension], axis=2) old_shape = tf.shape(extended_spec) new_shape = tf.concat([ [old_shape[0] * old_shape[1]], old_shape[2:]], axis=0) processed_instr_spec = tf.reshape(extended_spec, new_shape) return processed_instr_spec
097bc2e8f58f1e947b8f69a6163d1c64d2197f9e
3,736
def GetExclusiveStorageForNodes(cfg, node_uuids): """Return the exclusive storage flag for all the given nodes. @type cfg: L{config.ConfigWriter} @param cfg: cluster configuration @type node_uuids: list or tuple @param node_uuids: node UUIDs for which to read the flag @rtype: dict @return: mapping from node uuids to exclusive storage flags @raise errors.OpPrereqError: if any given node name has no corresponding node """ getflag = lambda n: _GetExclusiveStorageFlag(cfg, n) flags = map(getflag, node_uuids) return dict(zip(node_uuids, flags))
b93625bc2b865530bef0c648885f5615905e54c1
3,737
import csv def get_read_data(file, dic, keys): """ Assigns reads to labels""" r = csv.reader(open(file)) lines = list(r) vecs_forwards = [] labels_forwards = [] vecs_reverse = [] labels_reverse = [] for key in keys: for i in dic[key]: for j in lines: if i in j[0]: if '_2.fq' in j[0] or '_R2_' in j[0]: vecs_reverse.append(j[2:]) labels_reverse.append(key) else: vecs_forwards.append(j[2:]) labels_forwards.append(key) return np.array(vecs_forwards), np.array(labels_forwards), np.array(vecs_reverse), np.array(labels_reverse)
355c44cbf83ab9506755bda294723bfd1e8a15c1
3,738
def removeDuplicates(listToRemoveFrom: list[str]): """Given list, returns list without duplicates""" listToReturn: list[str] = [] for item in listToRemoveFrom: if item not in listToReturn: listToReturn.append(item) return listToReturn
8265e7c560d552bd9e30c0a1140d6668abd1b4d6
3,739
def check_hms_angle(value): """ Validating function for angle sexagesimal representation in hours. Used in the rich_validator """ if isinstance(value, list): raise validate.ValidateError("expected value angle, found list") match = hms_angle_re.match(value) if not match: raise VdtAngleError("not a valid hour angle: %s" % value) return hms_to_angle(match.groups())
bf1b6ec14cc131263913c331cb1d3cb9a06cdc76
3,740
import logging def get_logger(module_name): """Generates a logger for each module of the project. By default, the logger logs debug-level information into a newscrapy.log file and info-level information in console. Parameters ---------- module_name: str The name of the module for which the logger should be generated, in snakecase. Returns ------- Logger A logger for a specific module. """ logger = logging.getLogger('%s_logger' % (module_name)) file_handler = logging.FileHandler('newscrapy.log') console_handler = logging.StreamHandler() file_formatter = logging.Formatter('%(asctime)s - %(name)s - ' '%(levelname)s - %(message)s') console_formatter = logging.Formatter('%(message)s') logger.setLevel(logging.DEBUG) file_handler.setLevel(logging.DEBUG) console_handler.setLevel(logging.INFO) file_handler.setFormatter(file_formatter) console_handler.setFormatter(console_formatter) logger.addHandler(file_handler) logger.addHandler(console_handler) return logger
df9e07df89c43bc156812834c70b5b007200581e
3,741
def stats(): """Retrives the count of each object type. Returns: JSON object with the number of objects by type.""" return jsonify({ "amenities": storage.count("Amenity"), "cities": storage.count("City"), "places": storage.count("Place"), "reviews": storage.count("Review"), "states": storage.count("State"), "users": storage.count("User") })
31ebd630381fe33cdbff507a3d34497423dfd621
3,742
def addflux2pix(px,py,pixels,fmod): """Usage: pixels=addflux2pix(px,py,pixels,fmod) Drizel Flux onto Pixels using a square PSF of pixel size unity px,py are the pixel position (integers) fmod is the flux calculated for (px,py) pixel and it has the same length as px and py pixels is the image. """ xmax = pixels.shape[0] #Size of pixel array ymax = pixels.shape[1] pxmh = px-0.5 #location of reference corner of PSF square pymh = py-0.5 dx = np.floor(px+0.5)-pxmh dy = np.floor(py+0.5)-pymh # Supposing right-left as x axis and up-down as y axis: # Lower left pixel npx = int(pxmh) #Numpy arrays start at zero npy = int(pymh) #print('n',npx,npy) if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) : pixels[npx,npy]=pixels[npx,npy]+fmod*dx*dy #Same operations are done for the 3 pixels other neighbouring pixels # Lower right pixel npx = int(pxmh)+1 #Numpy arrays start at zero npy = int(pymh) if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) : pixels[npx,npy]=pixels[npx,npy]+fmod*(1.0-dx)*dy # Upper left pixel npx = int(pxmh) #Numpy arrays start at zero npy = int(pymh)+1 if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) : pixels[npx,npy]=pixels[npx,npy]+fmod*dx*(1.0-dy) # Upper right pixel npx = int(pxmh)+1 #Numpy arrays start at zero npy = int(pymh)+1 if (npx >= 0) & (npx < xmax) & (npy >= 0) & (npy < ymax) : pixels[npx,npy]=pixels[npx,npy]+fmod*(1.0-dx)*(1.0-dy) return pixels;
808f99dac20cda962146fee8f2b9878a07804f9b
3,743
def get_dea_landsat_vrt_dict(feat_list): """ this func is designed to take all releveant landsat bands on the dea public database for each scene in stac query. it results in a list of vrts for each band seperately and maps them to a dict where band name is the key, list is the value pair. """ # notify print('Getting landsat vrts for each relevant bands.') # check features type, length if not isinstance(feat_list, list): raise TypeError('Features must be a list of xml objects.') elif not len(feat_list) > 0: raise ValueError('No features provided.') # required dea landsat ard band names bands = [ 'nbart_blue', 'nbart_green', 'nbart_red', 'nbart_nir', 'nbart_swir_1', 'nbart_swir_2', 'oa_fmask' ] # iter each band name and build associated vrt list band_vrts_dict = {} for band in bands: print('Building landsat vrt list for band: {}.'.format(band)) band_vrts_dict[band] = make_vrt_list(feat_list, band=band) # notify and return print('Got {} landsat vrt band lists successfully.'.format(len(band_vrts_dict))) return band_vrts_dict
79009cc9fbcd085c8e95cf15f4271419d598d1ce
3,744
import logging import json def load_json() -> tuple[list["Team"], list["User"]]: """Load the Json file.""" logging.debug("Starting to load data file.") with open(".example.json") as file: data = json.load(file) if any(field not in data for field in REQUIRED_DATA_FIELDS): raise ValueError("Required field is missing.") team_mapping = {} users = [] for uid, user_data in data["users"].items(): if any(field not in user_data for field in REQUIRED_USER_FIELDS): raise ValueError("Required field is missing.") user = User(uid, **user_data) users.append(user) if user_data["team"] not in team_mapping: team_mapping[user_data["team"]] = [] team_mapping[user_data["team"]].append(user) teams = [] for tid, team_data in data["teams"].items(): if any(field not in team_data for field in REQUIRED_TEAM_FIELDS): raise ValueError("Required field is missing.") team = Team(tid, team_mapping.get(tid, []), None, **team_data) teams.append(team) for user in users: if user.team == tid: user.team = team if user.leader: if team.leader is not None: raise ValueError(f"Team {tid!r} has more than one leader.") team.leader = user for user in users: if isinstance(user.team, str): raise ValueError(f"Unknown team {user.team!r}") logging.debug("Data loaded.") return teams, users
602a55faa1f89a6adc7e4246b666b4f098f3b190
3,745
def is_zh_l_bracket(uni_ch): """判断一个 unicode 是否是中文左括号。""" if uni_ch == u'\uff08': return True else: return False
3ba18418005824a51de380c898726d050d464ec2
3,746
def petlink32_to_dynamic_projection_mMR(filename,n_packets,n_radial_bins,n_angles,n_sinograms,time_bins,n_axial,n_azimuthal,angles_axial,angles_azimuthal,size_u,size_v,n_u,n_v,span,n_segments,segments_sizes,michelogram_segments,michelogram_planes, status_callback): """Make dynamic compressed projection from list-mode data. """ descriptor = [ {'name':'filename', 'type':'string', 'value':filename ,'size':len(filename)}, {'name':'n_packets', 'type':'long', 'value':n_packets }, {'name':'n_radial_bins', 'type':'uint', 'value':n_radial_bins }, {'name':'n_angles', 'type':'uint', 'value':n_angles }, {'name':'n_sinograms', 'type':'uint', 'value':n_sinograms }, {'name':'n_time_bins', 'type':'uint', 'value':len(time_bins)-1 }, {'name':'time_bins', 'type':'array', 'value':np.int32(time_bins) }, {'name':'n_axial', 'type':'uint', 'value':n_axial }, {'name':'n_azimuthal', 'type':'uint', 'value':n_azimuthal }, {'name':'angles_axial', 'type':'array', 'value':angles_axial }, {'name':'angles_azimuthal', 'type':'array', 'value':angles_azimuthal }, {'name':'size_u', 'type':'float', 'value':size_u }, {'name':'size_v', 'type':'float', 'value':size_v }, {'name':'n_u', 'type':'uint', 'value':n_u }, {'name':'n_v', 'type':'uint', 'value':n_v }, {'name':'span', 'type':'uint', 'value':span }, {'name':'n_segments', 'type':'uint', 'value':n_segments }, {'name':'segments_sizes', 'type':'array', 'value':np.int32(segments_sizes) }, {'name':'michelogram_segments', 'type':'array', 'value':np.int32(michelogram_segments) }, {'name':'michelogram_planes', 'type':'array', 'value':np.int32(michelogram_planes) }, {'name':'status_callback', 'type':'function','value':status_callback, 'arg_types':['uint'] }, ] r = call_c_function( mMR_c.petlink32_to_dynamic_projection_mMR_michelogram, descriptor ) if not r.status == petlink.status_success(): raise ErrorInCFunction("The execution of 'petlink32_to_dynamic_projection_mMR_michelogram' was unsuccessful.",r.status,'mMR_c.petlink32_to_dynamic_projection_mMR') return r.dictionary
9764da2a2fb0c021274133fdd46661a44cf0dc31
3,747
from typing import Dict def is_core_recipe(component: Dict) -> bool: """ Returns True if a recipe component contains a "Core Recipe" preparation. """ preparations = component.get('recipeItem', {}).get('preparations') or [] return any(prep.get('id') == PreparationEnum.CORE_RECIPE.value for prep in preparations)
451798c6f31297a80ac43db00243fb2dd85ced46
3,748
def build_estimator(output_dir, first_layer_size, num_layers, dropout, learning_rate, save_checkpoints_steps): """Builds and returns a DNN Estimator, defined by input parameters. Args: output_dir: string, directory to save Estimator. first_layer_size: int, size of first hidden layer of DNN. num_layers: int, number of hidden layers. dropout: float, dropout rate used in training. learning_rate: float, learning_rate used in training. save_checkpoints_steps: int, training steps to save Estimator. Returns: `Estimator` instance. """ # Sets head to default head for DNNClassifier with two classes. model_params = { 'head': head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(), 'feature_columns': [ tf.feature_column.numeric_column(c, shape=[]) for c in constants.FEATURE_COLUMNS ], 'hidden_units': [ max(int(first_layer_size / (pow(2, i))), 2) for i in range(int(num_layers)) ], 'dropout': dropout, 'optimizer': tf.train.AdagradOptimizer(learning_rate) } def _model_fn(features, labels, mode, params): """Build TF graph based on canned DNN classifier.""" key_column = features.pop(constants.KEY_COLUMN, None) if key_column is None: raise ValueError('Key is missing from features.') spec = _dnn_model_fn(features=features, labels=labels, mode=mode, **params) predictions = spec.predictions if predictions: predictions[constants.KEY_COLUMN] = tf.convert_to_tensor_or_sparse_tensor( key_column) spec = spec._replace(predictions=predictions) spec = spec._replace(export_outputs={ 'classes': tf.estimator.export.PredictOutput(predictions) }) return spec config = tf.estimator.RunConfig(save_checkpoints_steps=save_checkpoints_steps) return tf.estimator.Estimator( model_fn=_model_fn, model_dir=output_dir, config=config, params=model_params)
339e26fd910aa7412b8e2b66845718e440ccada6
3,749
import json def importConfig(): """設定ファイルの読み込み Returns: tuple: str: interface, str: alexa_remote_control.sh path list: device list """ with open("config.json", "r", encoding="utf-8") as f: config = json.load(f) interface = config["interface"] if not interface: return False arc_path = config["arc_path"] devices = config["device_list"] return (interface, arc_path, devices)
84f8fc0deec4aebfe48209b01d1a35f7373d31e6
3,750
from google.cloud import datalabeling_v1beta1 as datalabeling import os def create_dataset(project_id): """Creates a dataset for the given Google Cloud project.""" client = datalabeling.DataLabelingServiceClient() # [END datalabeling_create_dataset_beta] # If provided, use a provided test endpoint - this will prevent tests on # this snippet from triggering any action by a real human if 'DATALABELING_ENDPOINT' in os.environ: opts = ClientOptions(api_endpoint=os.getenv('DATALABELING_ENDPOINT')) client = datalabeling.DataLabelingServiceClient(client_options=opts) # [START datalabeling_create_dataset_beta] formatted_project_name = client.project_path(project_id) dataset = datalabeling.types.Dataset( display_name='YOUR_DATASET_SET_DISPLAY_NAME', description='YOUR_DESCRIPTION' ) response = client.create_dataset(formatted_project_name, dataset) # The format of resource name: # project_id/{project_id}/datasets/{dataset_id} print('The dataset resource name: {}'.format(response.name)) print('Display name: {}'.format(response.display_name)) print('Description: {}'.format(response.description)) print('Create time:') print('\tseconds: {}'.format(response.create_time.seconds)) print('\tnanos: {}\n'.format(response.create_time.nanos)) return response
f808115479503aae52c7ef1b3863164355b5a091
3,751
from typing import List from typing import Dict from typing import Any def create_local_command(opts: Options, jobs: List[Dict[str, Any]], jobs_metadata: List[Options]) -> str: """Create a terminal command to run the jobs locally.""" cmd = "" for meta, job in zip(jobs_metadata, jobs): input_file = meta.input.absolute().as_posix() workdir = meta.workdir.absolute().as_posix() # Run locally cmd += f'cd {workdir} && {opts.command} {input_file} & ' return cmd
f5d23c1fb2271b44a323d1d17e9dda35df29fcd7
3,752
import time def time_for_log() -> str: """Function that print the current time for bot prints""" return time.strftime("%d/%m %H:%M:%S - ")
0f964d5c827782ff8cc433e57bb3e78d0a7c7cba
3,753
import math def _is_int(n) -> bool: """ is_int 是判断给定数字 n 是否为整数, 在判断中 n 小于epsilon的小数部分将被忽略, 是则返回 True,否则 False :param n: 待判断的数字 :return: True if n is A_ub integer, False else """ return (n - math.floor(n) < _epsilon) or (math.ceil(n) - n < _epsilon)
076a82d245333890d6790f65a58e5507905ca68f
3,754
def _cpp_het_stat(A, t_stop, rates, t_start=0. * pq.ms): """ Generate a Compound Poisson Process (CPP) with amplitude distribution A and heterogeneous firing rates r=r[0], r[1], ..., r[-1]. Parameters ---------- A : np.ndarray CPP's amplitude distribution. A[j] represents the probability of a synchronous event of size j among the generated spike trains. The sum over all entries of A must be equal to one. t_stop : pq.Quantity The end time of the output spike trains rates : pq.Quantity Array of firing rates of each spike train generated with t_start : pq.Quantity, optional The start time of the output spike trains Default: 0 pq.ms Returns ------- list of neo.SpikeTrain List of neo.SpikeTrains with different firing rates, forming a CPP with amplitude distribution `A`. """ # Computation of Parameters of the two CPPs that will be merged # (uncorrelated with heterog. rates + correlated with homog. rates) n_spiketrains = len(rates) # number of output spike trains # amplitude expectation expected_amplitude = np.dot(A, np.arange(n_spiketrains + 1)) r_sum = np.sum(rates) # sum of all output firing rates r_min = np.min(rates) # minimum of the firing rates # rate of the uncorrelated CPP r_uncorrelated = r_sum - n_spiketrains * r_min # rate of the correlated CPP r_correlated = r_sum / expected_amplitude - r_uncorrelated # rate of the hidden mother process r_mother = r_uncorrelated + r_correlated # Check the analytical constraint for the amplitude distribution if A[1] < (r_uncorrelated / r_mother).rescale( pq.dimensionless).magnitude: raise ValueError('A[1] too small / A[i], i>1 too high') # Compute the amplitude distribution of the correlated CPP, and generate it A = A * (r_mother / r_correlated).magnitude A[1] = A[1] - r_uncorrelated / r_correlated compound_poisson_spiketrains = _cpp_hom_stat( A, t_stop, r_min, t_start) # Generate the independent heterogeneous Poisson processes poisson_spiketrains = \ [homogeneous_poisson_process(rate - r_min, t_start, t_stop) for rate in rates] # Pool the correlated CPP and the corresponding Poisson processes return [_pool_two_spiketrains(compound_poisson_spiketrain, poisson_spiketrain) for compound_poisson_spiketrain, poisson_spiketrain in zip(compound_poisson_spiketrains, poisson_spiketrains)]
adc00577e9a6cb1ff7f9e0313befe98c81332ab1
3,755
def return_bad_parameter_config() -> CloudSettings: """Return a wrongly configured cloud config class.""" CloudSettingsTest = CloudSettings( # noqa: N806 settings_order=[ "init_settings", "aws_parameter_setting", "file_secret_settings", "env_settings", ] ) # noqa: N806 class AWSSettings(CloudSettingsTest): # type: ignore test: str = "Cool" prefix_test_store: str = "" return AWSSettings()
06f8af87873d571be9c5ae7fd2e563402e57b2d0
3,756
def update(isamAppliance, instance_id, id, filename=None, contents=None, check_mode=False, force=False): """ Update a file in the administration pages root :param isamAppliance: :param instance_id: :param id: :param name: :param contents: :param check_mode: :param force: :return: """ if force is True or _check_file(isamAppliance, instance_id, id) is True: if check_mode is True: return isamAppliance.create_return_object(changed=True) else: if filename is not None: return isamAppliance.invoke_put_files( "Update a file in the administration page root", "/wga/reverseproxy/{0}/management_root/{1}".format(instance_id, id), [ { 'file_formfield': 'file', 'filename': filename, 'mimetype': 'application/octet-stream' } ], { 'file': filename, 'type': 'file' }) elif contents is not None: return isamAppliance.invoke_put_files( "Update a file in the administration page root", "/wga/reverseproxy/{0}/management_root/{1}".format(instance_id, id), { 'contents': contents, 'type': 'file' }) else: return isamAppliance.create_return_object( warnings=["Either contents or filename parameter need to be provided. Skipping update request."])
af0b95096638fb34af130623b0929c4394a1a845
3,757
def view_deflate_encoded_content(): """Returns Deflate-encoded data. --- tags: - Response formats produces: - application/json responses: 200: description: Defalte-encoded data. """ return jsonify(get_dict("origin", "headers", method=request.method, deflated=True))
ff8d39f75a6cb526b3a61e85234e71efa174a208
3,758
def predict_from_word_vectors_matrix(tokens, matrix, nlp, POS="NOUN", top_number=constants.DEFAULT_TOP_ASSOCIATIONS): """ Make a prediction based on the word vectors :param tokens: :param matrix: :param nlp: :param POS: :param top_number: :return: """ vector_results = collect_word_vector_associations(tokens, matrix) top_results = get_top_results(vector_results, nlp, top_number, POS) return top_results
6a491e481238af932994bb8d383baca4da1ebd55
3,759
import os def get_luis_keys(): """Retrieve Keys for LUIS app""" load_dotenv() key = os.getenv("LUIS_KEY") region = os.getenv("LUIS_REGION") app_id = os.getenv("LUIS_APP_ID") return key, region, app_id
94c880c26d08a60ecde6c86952de64d8ea45a46d
3,760
import re from typing import OrderedDict def xls_to_dict(path_or_file): """ Return a Python dictionary with a key for each worksheet name. For each sheet there is a list of dictionaries, each dictionary corresponds to a single row in the worksheet. A dictionary has keys taken from the column headers and values equal to the cell value for that row and column. All the keys and leaf elements are unicode text. """ try: if isinstance(path_or_file, basestring): workbook = xlrd.open_workbook(filename=path_or_file) else: workbook = xlrd.open_workbook(file_contents=path_or_file.read()) except XLRDError as error: raise PyXFormError("Error reading .xls file: %s" % error) def xls_to_dict_normal_sheet(sheet): def iswhitespace(string): return isinstance(string, basestring) and len(string.strip()) == 0 # Check for duplicate column headers column_header_list = list() for column in range(0, sheet.ncols): column_header = sheet.cell_value(0, column) if column_header in column_header_list: raise PyXFormError("Duplicate column header: %s" % column_header) # xls file with 3 columns mostly have a 3 more columns that are # blank by default or something, skip during check if column_header is not None: if not iswhitespace(column_header): # strip whitespaces from the header clean_header = re.sub(r"( )+", " ", column_header.strip()) column_header_list.append(clean_header) result = [] for row in range(1, sheet.nrows): row_dict = OrderedDict() for column in range(0, sheet.ncols): # Changing to cell_value function # convert to string, in case it is not string key = "%s" % sheet.cell_value(0, column) key = key.strip() value = sheet.cell_value(row, column) # remove whitespace at the beginning and end of value if isinstance(value, basestring): value = value.strip() value_type = sheet.cell_type(row, column) if value is not None: if not iswhitespace(value): try: row_dict[key] = xls_value_to_unicode( value, value_type, workbook.datemode ) except XLDateAmbiguous: raise PyXFormError( XL_DATE_AMBIGOUS_MSG % (sheet.name, column_header, row) ) # Taking this condition out so I can get accurate row numbers. # TODO: Do the same for csvs # if row_dict != {}: result.append(row_dict) return result, _list_to_dict_list(column_header_list) def xls_value_from_sheet(sheet, row, column): value = sheet.cell_value(row, column) value_type = sheet.cell_type(row, column) if value is not None and value != "": try: return xls_value_to_unicode(value, value_type, workbook.datemode) except XLDateAmbiguous: raise PyXFormError(XL_DATE_AMBIGOUS_MSG % (sheet.name, column, row)) else: raise PyXFormError("Empty Value") result = OrderedDict() for sheet in workbook.sheets(): # Note that the sheet exists but do no further processing here. result[sheet.name] = [] # Do not process sheets that have nothing to do with XLSForm. if sheet.name not in constants.SUPPORTED_SHEET_NAMES: if len(workbook.sheets()) == 1: ( result[constants.SURVEY], result["%s_header" % constants.SURVEY], ) = xls_to_dict_normal_sheet(sheet) else: continue else: ( result[sheet.name], result["%s_header" % sheet.name], ) = xls_to_dict_normal_sheet(sheet) return result
8ac0ca78dae6bec7025565607ddc174205d0389a
3,761
def blendImg(img_a, img_b, α=0.8, β=1., γ=0.): """ The result image is computed as follows: img_a * α + img_b * β + γ """ return cv2.addWeighted(img_a, α, img_b, β, γ)
f60918ba424b0d59e9025c088c0f2c9a3f739fde
3,762
def setup(app): """Sets up the extension""" app.add_autodocumenter(documenters.FunctionDocumenter) app.add_config_value( "autoclass_content", "class", True, ENUM("both", "class", "init") ) app.add_config_value( "autodoc_member_order", "alphabetical", True, ENUM("alphabetic", "alphabetical", "bysource", "groupwise"), ) app.add_config_value("autodoc_default_options", {}, True) app.add_config_value("autodoc_docstring_signature", True, True) app.add_config_value("autodoc_mock_imports", [], True) app.add_config_value( "autodoc_typehints", "signature", True, ENUM("signature", "description", "none") ) app.add_config_value("autodoc_type_aliases", {}, True) app.add_config_value("autodoc_warningiserror", True, True) app.add_config_value("autodoc_inherit_docstrings", True, True) app.add_event("autodoc-before-process-signature") app.add_event("autodoc-process-docstring") app.add_event("autodoc-process-signature") app.add_event("autodoc-skip-member") app.connect("config-inited", migrate_autodoc_member_order, priority=800) app.setup_extension("sphinx.ext.autodoc.type_comment") app.setup_extension("sphinx.ext.autodoc.typehints") return {"version": sphinx.__display_version__, "parallel_read_safe": True}
b6cf9cfcc59eb83c10be441faebd24e6000673f3
3,763
def genoimc_dup4_loc(): """Create genoimc dup4 sequence location""" return { "_id": "ga4gh:VSL.us51izImAQQWr-Hu6Q7HQm-vYvmb-jJo", "sequence_id": "ga4gh:SQ.-A1QmD_MatoqxvgVxBLZTONHz9-c7nQo", "interval": { "type": "SequenceInterval", "start": { "value": 30417575, "comparator": "<=", "type": "IndefiniteRange" }, "end": { "value": 31394018, "comparator": ">=", "type": "IndefiniteRange" } }, "type": "SequenceLocation" }
3ea1b39fed22487bebffc78d45cb493b7d7afa4a
3,764
def compare_versions(a, b): """Return 0 if a == b, 1 if a > b, else -1.""" a, b = version_to_ints(a), version_to_ints(b) for i in range(min(len(a), len(b))): if a[i] > b[i]: return 1 elif a[i] < b[i]: return -1 return 0
0b22589164f7d3731edc34af97d306186e677371
3,765
def get_machine_action_data(machine_action_response): """Get machine raw response and returns the machine action info in context and human readable format. Notes: Machine action is a collection of actions you can apply on the machine, for more info https://docs.microsoft.com/en-us/windows/security/threat-protection/microsoft-defender-atp/machineaction Returns: dict. Machine action's info """ action_data = \ { "ID": machine_action_response.get('id'), "Type": machine_action_response.get('type'), "Scope": machine_action_response.get('scope'), "Requestor": machine_action_response.get('requestor'), "RequestorComment": machine_action_response.get('requestorComment'), "Status": machine_action_response.get('status'), "MachineID": machine_action_response.get('machineId'), "ComputerDNSName": machine_action_response.get('computerDnsName'), "CreationDateTimeUtc": machine_action_response.get('creationDateTimeUtc'), "LastUpdateTimeUtc": machine_action_response.get('lastUpdateTimeUtc'), "RelatedFileInfo": { "FileIdentifier": machine_action_response.get('fileIdentifier'), "FileIdentifierType": machine_action_response.get('fileIdentifierType') }, "Commands": machine_action_response.get('commands') } return action_data
1e0ffc37d8d3b5662b64ec28cb850c6277b1bad2
3,766
import os def BulkRemove(fname,masterfile=None,edlfile=None): """ Given a file with one IP per line, remove the given IPs from the EDL if they are in there """ global AutoSave success = True removes = list() if os.path.exists(fname): with open(fname,"rt") as ip_list: for ip in ip_list: removes.append(ip.strip()) Remove(removes,masterfile,edlfile) else: success = False return success
c6ffe54e8b20437f6eb48459e775dd0a38b4c5d2
3,767
import torch def convolutionalize(modules, input_size): """ Recast `modules` into fully convolutional form. The conversion transfers weights and infers kernel sizes from the `input_size` and modules' action on it. n.b. This only handles the conversion of linear/fully-connected modules, although other module types could require conversion for correctness. """ fully_conv_modules = [] x = torch.zeros((1, ) + input_size) for m in modules: if isinstance(m, nn.Linear): n = nn.Conv2d(x.size(1), m.weight.size(0), kernel_size=(x.size(2), x.size(3))) n.weight.data.view(-1).copy_(m.weight.data.view(-1)) n.bias.data.view(-1).copy_(m.bias.data.view(-1)) m = n fully_conv_modules.append(m) x = m(x) return fully_conv_modules
5693a17bac0f39538bfcada3280ce06ef91230a3
3,768
def is_unique2(s): """ Use a list and the int of the character will tell if that character has already appeared once """ d = [] for t in s: if d[int(t)]: return False d[int(t)] = True return True
b1a1bdea8108690a0e227fd0b75f910bd6b99a07
3,769
import random def uncomplete_tree_parallel(x:ATree, mode="full"): """ Input is tuple (nl, fl, split) Output is a randomly uncompleted tree, every node annotated whether it's terminated and what actions are good at that node """ fl = x fl.parent = None add_descendants_ancestors(fl) y = ATree("@START@", []) y.align = fl y.is_open = True i = 0 y = assign_gold_actions(y, mode=mode) choices = [deepcopy(y)] # !! can't cache because different choices ! while not all_terminated(y): y = mark_for_execution(y, mode=mode) y = execute_chosen_actions(y, mode=mode) y = assign_gold_actions(y, mode=mode) y = adjust_gold(y, mode=mode) choices.append(deepcopy(y)) i += 1 ret = random.choice(choices[:-1]) return ret
f59e0f0279c9c439034116f769f51d60a924c4af
3,770
def stations_by_river(stations): """Give a dictionary to hold the rivers name as keys and their corresponding stations' name as values""" rivers_name = [] for i in stations: if i.river not in rivers_name: rivers_name.append(i.river) elif i.river in rivers_name: continue big_list = [] for n in rivers_name: lists = [] for y in stations: if n == y.river: lists.append(y.name) elif n != y.river: continue lists = sorted(lists) big_list.append(lists) dictionary = dict(zip(rivers_name, big_list)) dicti = {} for key in sorted(dictionary): dicti.update({key : dictionary[key]}) assert dicti != {} return dicti
66fd928415619d175b7069b8c3103a3f7d930aac
3,771
def QA_SU_save_huobi(frequency): """ Save huobi kline "smart" """ if (frequency not in ["1d", "1day", "day"]): return QA_SU_save_huobi_min(frequency) else: return QA_SU_save_huobi_day(frequency)
cdea45afe6d7e0b61dea517adb8fc484e8eafa38
3,772
import os def get_cachefile(filename=None): """Resolve cachefile path """ if filename is None: for f in FILENAMES: if os.path.exists(f): return f return IDFILE else: return filename
39046ce95f763720a6ad23584717f4da379cf690
3,773
def inverse(a): """ [description] calculating the inverse of the number of characters, we do this to be able to find our departure when we arrive. this part will be used to decrypt the message received. :param a: it is an Int :return: x -> it is an Int """ x = 0 while a * x % 97 != 1: x = x + 1 return x
2893d2abda34e4573eb5d9602edc0f8e14246e09
3,774
from typing import Optional from typing import Union def currency_column_to_numeric( df: pd.DataFrame, column_name: str, cleaning_style: Optional[str] = None, cast_non_numeric: Optional[dict] = None, fill_all_non_numeric: Optional[Union[float, int]] = None, remove_non_numeric: bool = False, ) -> pd.DataFrame: """Convert currency column to numeric. This method does not mutate the original DataFrame. This method allows one to take a column containing currency values, inadvertently imported as a string, and cast it as a float. This is usually the case when reading CSV files that were modified in Excel. Empty strings (i.e. `''`) are retained as `NaN` values. Example: >>> import pandas as pd >>> import janitor >>> df = pd.DataFrame({ ... "a_col": [" 24.56", "-", "(12.12)", "1,000,000"], ... "d_col": ["", "foo", "1.23 dollars", "-1,000 yen"], ... }) >>> df # doctest: +NORMALIZE_WHITESPACE a_col d_col 0 24.56 1 - foo 2 (12.12) 1.23 dollars 3 1,000,000 -1,000 yen The default cleaning style. >>> df.currency_column_to_numeric("d_col") a_col d_col 0 24.56 NaN 1 - NaN 2 (12.12) 1.23 3 1,000,000 -1000.00 The accounting cleaning style. >>> df.currency_column_to_numeric("a_col", cleaning_style="accounting") # doctest: +NORMALIZE_WHITESPACE a_col d_col 0 24.56 1 0.00 foo 2 -12.12 1.23 dollars 3 1000000.00 -1,000 yen Valid cleaning styles are: - `None`: Default cleaning is applied. Empty strings are always retained as `NaN`. Numbers, `-`, `.` are extracted and the resulting string is cast to a float. - `'accounting'`: Replaces numbers in parentheses with negatives, removes commas. :param df: The pandas DataFrame. :param column_name: The column containing currency values to modify. :param cleaning_style: What style of cleaning to perform. :param cast_non_numeric: A dict of how to coerce certain strings to numeric type. For example, if there are values of 'REORDER' in the DataFrame, `{'REORDER': 0}` will cast all instances of 'REORDER' to 0. Only takes effect in the default cleaning style. :param fill_all_non_numeric: Similar to `cast_non_numeric`, but fills all strings to the same value. For example, `fill_all_non_numeric=1`, will make everything that doesn't coerce to a currency `1`. Only takes effect in the default cleaning style. :param remove_non_numeric: If set to True, rows of `df` that contain non-numeric values in the `column_name` column will be removed. Only takes effect in the default cleaning style. :raises ValueError: If `cleaning_style` is not one of the accepted styles. :returns: A pandas DataFrame. """ # noqa: E501 check("column_name", column_name, [str]) check_column(df, column_name) column_series = df[column_name] if cleaning_style == "accounting": df.loc[:, column_name] = df[column_name].apply( _clean_accounting_column ) return df if cleaning_style is not None: raise ValueError( "`cleaning_style` is expected to be one of ('accounting', None). " f"Got {cleaning_style!r} instead." ) if cast_non_numeric: check("cast_non_numeric", cast_non_numeric, [dict]) _make_cc_patrial = partial( _currency_column_to_numeric, cast_non_numeric=cast_non_numeric, ) column_series = column_series.apply(_make_cc_patrial) if remove_non_numeric: df = df.loc[column_series != "", :] # _replace_empty_string_with_none is applied here after the check on # remove_non_numeric since "" is our indicator that a string was coerced # in the original column column_series = _replace_empty_string_with_none(column_series) if fill_all_non_numeric is not None: check("fill_all_non_numeric", fill_all_non_numeric, [int, float]) column_series = column_series.fillna(fill_all_non_numeric) column_series = _replace_original_empty_string_with_none(column_series) df = df.assign(**{column_name: pd.to_numeric(column_series)}) return df
e382752e5aff389872da69f42a3ec62785df336f
3,775
async def subreddit_type_submissions(sub="wallstreetbets", kind="new"): """ """ comments = [] articles = [] red = await reddit_instance() subreddit = await red.subreddit(sub) if kind == "hot": submissions = subreddit.hot() elif kind == "top": submissions = subreddit.top() elif kind == "new": submissions = subreddit.new() elif kind == "random_rising": submissions = subreddit.random_rising() else: submissions = subreddit.random() async for submission in submissions: article = clean_submission(submission) article['subreddit'] = sub articles.append(article) top_level_comments = await submission.comments() print(f"📗 Looking at submission: {article['title'][:40]}...") for top_level_comment in top_level_comments: if isinstance(top_level_comment, MoreComments): continue comment = clean_comment(top_level_comment) print(f"🗯️ ... {comment['author']} said {comment['body'][:40]}") comment['article_id'] = article['id'] comments.append(comment) return (articles, comments)
9cc8655575ca8fd3729e220b0ee3fc8e45e4ed56
3,776
import getopt import sys def get_args(): """Get argument""" try: opts, args = getopt.getopt( sys.argv[1:], "i:s:t:o:rvh", ["ibam=", "snp=", "tag=", "output=", "rstat", "verbose", "help"]) except getopt.GetoptError as err: print(str(err)) usage() sys.exit(-1) return opts
2caf9ac202c788da2587e37e0754a7789fdc8142
3,777
import typing import json def _get_bundle_manifest( uuid: str, replica: Replica, version: typing.Optional[str], *, bucket: typing.Optional[str] = None) -> typing.Optional[dict]: """ Return the contents of the bundle manifest file from cloud storage, subject to the rules of tombstoning. If version is None, return the latest version, once again, subject to the rules of tombstoning. If the bundle cannot be found, return None """ uuid = uuid.lower() handle = Config.get_blobstore_handle(replica) default_bucket = replica.bucket # need the ability to use fixture bucket for testing bucket = default_bucket if bucket is None else bucket def tombstone_exists(uuid: str, version: typing.Optional[str]): return test_object_exists(handle, bucket, BundleTombstoneID(uuid=uuid, version=version).to_key()) # handle the following deletion cases # 1. the whole bundle is deleted # 2. the specific version of the bundle is deleted if tombstone_exists(uuid, None) or (version and tombstone_exists(uuid, version)): return None # handle the following deletion case # 3. no version is specified, we want the latest _non-deleted_ version if version is None: # list the files and find the one that is the most recent. prefix = f"bundles/{uuid}." object_names = handle.list(bucket, prefix) version = _latest_version_from_object_names(object_names) if version is None: # no matches! return None bundle_fqid = BundleFQID(uuid=uuid, version=version) # retrieve the bundle metadata. try: bundle_manifest_blob = handle.get(bucket, bundle_fqid.to_key()).decode("utf-8") return json.loads(bundle_manifest_blob) except BlobNotFoundError: return None
7881e1514a9a645c1f7ee6479baa6e74bae4dabb
3,778
def handler400(request, exception): """ This is a Django handler function for 400 Bad Request error :param request: The Django Request object :param exception: The exception caught :return: The 400 error page """ context = get_base_context(request) context.update({ 'message': { 'title': '400 Bad Request', 'description': 'Your client has issued a malformed or illegal request.' } }) return render(request, 'velarium/base.html', context=context, status=400)
0dc1b81ec86d675f348728863dfe07efbd936e8e
3,779
import os import json def object_trajectory_proposal(vid, fstart, fend, gt=False, verbose=False): """ Set gt=True for providing groundtruth bounding box trajectories and predicting classme feature only """ vsig = get_segment_signature(vid, fstart, fend) name = 'traj_cls_gt' if gt else 'traj_cls' path = get_feature_path(name, vid) path = os.path.join(path, '{}-{}.json'.format(vsig, name)) if os.path.exists(path): if verbose: print('loading object {} proposal for video segment {}'.format(name, vsig)) with open(path, 'r') as fin: trajs = json.load(fin) trajs = [Trajectory(**traj) for traj in trajs] else: if verbose: print('no object {} proposal for video segment {}'.format(name, vsig)) trajs = [] return trajs
c8a0fc1dc8109055a16becc40e94e0e15ec8289e
3,780
def _gather_topk_beams(nested, score_or_log_prob, batch_size, beam_size): """Gather top beams from nested structure.""" _, topk_indexes = tf.nn.top_k(score_or_log_prob, k=beam_size) return _gather_beams(nested, topk_indexes, batch_size, beam_size)
ebdaf391104a3f271a42549708f3e7adfaf8b0b0
3,781
import scipy import numpy def _traceinv_exact(K, B, C, matrix, gram, exponent): """ Finds traceinv directly for the purpose of comparison. """ # Exact solution of traceinv for band matrix if B is not None: if scipy.sparse.isspmatrix(K): K_ = K.toarray() B_ = B.toarray() if C is not None: C_ = C.toarray() else: K_ = K B_ = B if C is not None: C_ = C if exponent == 0: if C is not None: traceinv_exact = numpy.trace(C_ @ B_) else: traceinv_exact = numpy.trace(B_) else: if gram: K_ = numpy.matmul(K_.T, K_) if exponent > 1: K1 = K_.copy() for i in range(1, exponent): K_ = numpy.matmul(K_, K1) Kinv = numpy.linalg.inv(K_) Op = numpy.matmul(Kinv, B_) if C is not None: Op = Kinv @ C_ @ Op traceinv_exact = numpy.trace(Op) elif exponent == 1 and not gram: # B is identity. Using analytic formula. traceinv_exact = band_matrix_traceinv(matrix['a'], matrix['b'], matrix['size'], True) else: # B and C are identity. Compute traceinv directly. if scipy.sparse.isspmatrix(K): K_ = K.toarray() else: K_ = K if exponent == 0: traceinv_exact = K_.shape[0] else: if gram: K_ = numpy.matmul(K_.T, K_) K_temp = K_.copy() for i in range(1, exponent): K_ = numpy.matmul(K_, K_temp) Kinv = numpy.linalg.inv(K_) traceinv_exact = numpy.trace(Kinv) return traceinv_exact
3637a5aa726ef1bf8489783c435c429b59422240
3,782
def create_feature_vector_of_mean_mfcc_for_song(song_file_path: str) -> ndarray: """ Takes in a file path to a song segment and returns a numpy array containing the mean mfcc values :param song_file_path: str :return: ndarray """ song_segment, sample_rate = librosa.load(song_file_path) mfccs = librosa.feature.mfcc(y=song_segment, sr=sample_rate, n_mfcc=NUMBER_OF_MFCC) mfccs_processed = np.mean(mfccs.T, axis=0) df = pd.DataFrame(mfccs_processed) z_score_normalized_mfccs = (df.values - df.values.mean()) / df.values.std() z_score_normalized_mfccs = np.array([i[0] for i in z_score_normalized_mfccs]) return z_score_normalized_mfccs
8992feafd483bfe7b4af5e715ba1455884e1b710
3,783
def stations_highest_rel_level(stations, N): """Returns a list containing the names of the N stations with the highest water level relative to the typical range""" names = [] # create list for names levels = [] # create list for levels for i in range(len(stations)): # iterate through stations if stations[i].relative_water_level() is not None: # ^checks for valid relative water level names.append(stations[i].name) levels.append(stations[i].relative_water_level()) # ^adds names and levels to respective lists combined = list(zip(names, levels)) # combines names and levels combined.sort(key=lambda x: x[1], reverse=1) # sorts in reverse output = [] # create output list for i in range(N): # iterate up to N output.append(combined[i][0]) # add station name to output return output
780a03a424c9b2f0dedee2e93eb9bd27cc1fce36
3,784
def read_image(file_name, format=None): """ Read an image into the given format. Will apply rotation and flipping if the image has such exif information. Args: file_name (str): image file path format (str): one of the supported image modes in PIL, or "BGR" Returns: image (np.ndarray): an HWC image in the given format. """ with PathManager.open(file_name, "rb") as f: image = Image.open(f) # capture and ignore this bug: https://github.com/python-pillow/Pillow/issues/3973 try: image = ImageOps.exif_transpose(image) except Exception: pass if format is not None: # PIL only supports RGB, so convert to RGB and flip channels over below conversion_format = format if format == "BGR": conversion_format = "RGB" image = image.convert(conversion_format) image = np.asarray(image) if format == "BGR": # flip channels if needed image = image[:, :, ::-1] # PIL squeezes out the channel dimension for "L", so make it HWC if format == "L": image = np.expand_dims(image, -1) return image
ffa2cd8cbea750a08fa28942e71c48bbcfbefdaf
3,785
def add_global_nodes_edges(g_nx : nx.Graph, feat_data: np.ndarray, adj_list: np.ndarray, g_feat_data: np.ndarray, g_adj_list: np.ndarray): """ :param g_nx: :param feat_data: :param adj_list: :param g_feat_data: :param g_adj_list: :return: """ feat_data = np.concatenate([feat_data, g_feat_data], 0) # adj_list.update((k, adj_list[k].union(g_adj_list[k])) for k in range(len(g_adj_list))) adj_list.update((k, adj_list[k].union(g_adj_list[k])) for k in range(len(feat_data))) g_edge_list = [[[k, v] for v in vs] for k, vs in g_adj_list.items()] g_edge_list = [x for sublist in g_edge_list for x in sublist] g_nx.add_edges_from(g_edge_list) return g_nx, feat_data, adj_list
1097becfe88f05008541aaa6c3c074fcd5c3149a
3,786
import os def _load_readme(file_name: str = "README.md") -> str: """ Load readme from a text file. Args: file_name (str, optional): File name that contains the readme. Defaults to "README.md". Returns: str: Readme text. """ with open(os.path.join(_PATH_ROOT, file_name), "r", encoding="utf-8") as file: readme = file.read() return readme
5815ded89c5b952edc8a8b691afe48f99d121be6
3,787
def get_data_collector_instance(args, config): """Get the instance of the data :param args: arguments of the script :type args: Namespace :raises NotImplementedError: no data collector implemented for given data source :return: instance of the specific data collector :rtype: subclass of BaseDataCollector """ if args.data_source == DATA_SOURCE_RSS: return RssDataCollector(args.base_url, config[CONFIG_RSS_HEADER]) elif args.data_source == DATA_SOURCE_REDDIT: return RedditDataCollector(REDDIT_CLIENT_ID, REDDIT_CLIENT_SECRET) elif args.data_source == DATA_SOURCE_TWITTER: return TwitterDataCollector(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET, TWITTER_BEARER_TOKEN) else: raise NotImplementedError
75fda1231e1489da4b0c10473c9f657b143047c1
3,788
def timeIntegration(params): """Sets up the parameters for time integration :param params: Parameter dictionary of the model :type params: dict :return: Integrated activity variables of the model :rtype: (numpy.ndarray,) """ dt = params["dt"] # Time step for the Euler intergration (ms) duration = params["duration"] # imulation duration (ms) RNGseed = params["seed"] # seed for RNG # ------------------------------------------------------------------------ # local parameters # See Papadopoulos et al., Relations between large-scale brain connectivity and effects of regional stimulation # depend on collective dynamical state, arXiv, 2020 tau_exc = params["tau_exc"] # tau_inh = params["tau_inh"] # c_excexc = params["c_excexc"] # c_excinh = params["c_excinh"] # c_inhexc = params["c_inhexc"] # c_inhinh = params["c_inhinh"] # a_exc = params["a_exc"] # a_inh = params["a_inh"] # mu_exc = params["mu_exc"] # mu_inh = params["mu_inh"] # # external input parameters: # Parameter of the Ornstein-Uhlenbeck process for the external input(ms) tau_ou = params["tau_ou"] # Parameter of the Ornstein-Uhlenbeck (OU) process for the external input ( mV/ms/sqrt(ms) ) sigma_ou = params["sigma_ou"] # Mean external excitatory input (OU process) (mV/ms) exc_ou_mean = params["exc_ou_mean"] # Mean external inhibitory input (OU process) (mV/ms) inh_ou_mean = params["inh_ou_mean"] # ------------------------------------------------------------------------ # global coupling parameters # Connectivity matrix # Interareal relative coupling strengths (values between 0 and 1), Cmat(i,j) connection from jth to ith Cmat = params["Cmat"] N = len(Cmat) # Number of nodes K_gl = params["K_gl"] # global coupling strength # Interareal connection delay lengthMat = params["lengthMat"] signalV = params["signalV"] if N == 1: Dmat = np.zeros((N, N)) else: # Interareal connection delays, Dmat(i,j) Connnection from jth node to ith (ms) Dmat = dp.computeDelayMatrix(lengthMat, signalV) Dmat[np.eye(len(Dmat)) == 1] = np.zeros(len(Dmat)) Dmat_ndt = np.around(Dmat / dt).astype(int) # delay matrix in multiples of dt params["Dmat_ndt"] = Dmat_ndt # ------------------------------------------------------------------------ # Initialization # Floating point issue in np.arange() workaraound: use integers in np.arange() t = np.arange(1, round(duration, 6) / dt + 1) * dt # Time variable (ms) sqrt_dt = np.sqrt(dt) max_global_delay = np.max(Dmat_ndt) startind = int(max_global_delay + 1) # timestep to start integration at exc_ou = params["exc_ou"] inh_ou = params["inh_ou"] exc_ext = params["exc_ext"] inh_ext = params["inh_ext"] # state variable arrays, have length of t + startind # they store initial conditions AND simulated data excs = np.zeros((N, startind + len(t))) inhs = np.zeros((N, startind + len(t))) # ------------------------------------------------------------------------ # Set initial values # if initial values are just a Nx1 array if np.shape(params["exc_init"])[1] == 1: exc_init = np.dot(params["exc_init"], np.ones((1, startind))) inh_init = np.dot(params["inh_init"], np.ones((1, startind))) # if initial values are a Nxt array else: exc_init = params["exc_init"][:, -startind:] inh_init = params["inh_init"][:, -startind:] # xsd = np.zeros((N,N)) # delayed activity exc_input_d = np.zeros(N) # delayed input to x inh_input_d = np.zeros(N) # delayed input to y np.random.seed(RNGseed) # Save the noise in the activity array to save memory excs[:, startind:] = np.random.standard_normal((N, len(t))) inhs[:, startind:] = np.random.standard_normal((N, len(t))) excs[:, :startind] = exc_init inhs[:, :startind] = inh_init noise_exc = np.zeros((N,)) noise_inh = np.zeros((N,)) # ------------------------------------------------------------------------ return timeIntegration_njit_elementwise( startind, t, dt, sqrt_dt, N, Cmat, K_gl, Dmat_ndt, excs, inhs, exc_input_d, inh_input_d, exc_ext, inh_ext, tau_exc, tau_inh, a_exc, a_inh, mu_exc, mu_inh, c_excexc, c_excinh, c_inhexc, c_inhinh, noise_exc, noise_inh, exc_ou, inh_ou, exc_ou_mean, inh_ou_mean, tau_ou, sigma_ou, )
24d6702a92f82c6cc7fc1a337cd351b54c567e8b
3,789
def is_role_user(session, user=None, group=None): # type: (Session, User, Group) -> bool """ Takes in a User or a Group and returns a boolean indicating whether that User/Group is a component of a service account. Args: session: the database session user: a User object to check group: a Group object to check Throws: AssertionError if neither a user nor a group is provided Returns: whether the User/Group is a component of a service account """ if user is not None: return user.role_user assert group is not None user = User.get(session, name=group.groupname) if not user: return False return user.role_user
3d6b62b1708882b734031d737fa00f29ba9a9f95
3,790
def argCOM(y): """argCOM(y) returns the location of COM of y.""" idx = np.round(np.sum(y/np.sum(y)*np.arange(len(y)))) return int(idx)
197ac25043b10575efb7405dba12c0d2e6f9976f
3,791
def fringe(z, z1, z2, rad, a1): """ Approximation to the longitudinal profile of a multipole from a permanent magnet assembly. see Wan et al. 2018 for definition and Enge functions paper (Enge 1964) """ zz1 = (z - z1) / (2 * rad / pc.pi) zz2 = (z - z2) / (2 * rad / pc.pi) fout = ( (1 / ( 2 * np.tanh((z2 - z1) / (4 * rad / pc.pi)) ) ) * (np.tanh(zz1 + a1 * zz1**2 ) - np.tanh(zz2 - a1 * zz2**2) ) ) return fout
b1d0138937d1c622809d6f559f17430e89259fed
3,792
import random def random_param_shift(vals, sigmas): """Add a random (normal) shift to a parameter set, for testing""" assert len(vals) == len(sigmas) shifts = [random.gauss(0, sd) for sd in sigmas] newvals = [(x + y) for x, y in zip(vals, shifts)] return newvals
07430572c5051b7142499bcbdbc90de5abfcbd4d
3,793
def compute_encrypted_request_hash(caller): """ This function will compute encrypted request Hash :return: encrypted request hash """ first_string = get_parameter(caller.params_obj, "requesterNonce") or "" worker_order_id = get_parameter(caller.params_obj, "workOrderId") or "" worker_id = get_parameter(caller.params_obj, "workerId") or "" workload_id = get_parameter(caller.params_obj, "workloadId") or "" requester_id = get_parameter(caller.params_obj, "requesterId") or "" requester_id = str(requester_id) first_string += \ worker_order_id + worker_id + workload_id + requester_id concat_hash = first_string.encode("UTF-8") hash_1 = crypto_utils.compute_message_hash(concat_hash) in_data = get_parameter(caller.params_obj, "inData") out_data = get_parameter(caller.params_obj, "outData") hash_2 = bytearray() if in_data is not None: hash_2 = compute_hash_string(in_data) hash_3 = bytearray() if out_data is not None: hash_3 = compute_hash_string(out_data) final_string = hash_1 + hash_2 + hash_3 caller.final_hash = crypto_utils.compute_message_hash(final_string) encrypted_request_hash = crypto_utils.byte_array_to_hex( crypto_utils.encrypt_data( caller.final_hash, caller.session_key, caller.session_iv)) return encrypted_request_hash
cf87c354df550b142030781e8b84ec1cb385489f
3,794
def translate_line_test(string): """ Translates raw log line into sequence of integer representations for word tokens with sos and eos tokens. :param string: Raw log line from auth_h.txt :return: (list) Sequence of integer representations for word tokens with sos and eos tokens. """ data = string.split(",") time = int(data[0]) # could be used to make categorical variables for day of week and time of day. src_user, src_domain, dst_user, dst_domain, src_pc, dst_pc = split_line(string) src_user = lookup(src_user, word_token_inds, None) src_domain = lookup(src_domain, word_token_inds, domain_counts) if dst_user.startswith('U'): dst_user = lookup(dst_user, word_token_inds, None) else: dst_user = lookup(dst_user, word_token_inds, pc_counts) dst_domain = lookup(dst_domain, word_token_inds, domain_counts) src_pc = lookup(src_pc, word_token_inds, pc_counts) dst_pc = lookup(dst_pc, word_token_inds, pc_counts) if data[5].startswith("MICROSOFT_AUTH"): # Deals with file corruption for this value. data[5] = "MICROSOFT_AUTH" auth_type = lookup(data[5], word_token_inds, None) logon_type = lookup(data[6], word_token_inds, None) auth_orient = lookup(data[7], word_token_inds, None) success = lookup(data[8].strip(), word_token_inds, None) return "%s %s %s %s %s %s %s %s %s %s %s %s\n" % (str(sos), src_user, src_domain, dst_user, dst_domain, src_pc, dst_pc, auth_type, logon_type, auth_orient, success, str(eos))
d311eb9c6b398391724e868071d89f2f6c442912
3,795
def preprocess_signal(signal, sample_rate): """ Preprocess a signal for input into a model Inputs: signal: Numpy 1D array containing waveform to process sample_rate: Sampling rate of the input signal Returns: spectrogram: STFT of the signal after resampling to 10kHz and adding preemphasis. X_in: Scaled STFT input feature for the model """ # Compute the spectrogram of the signal spectrogram = make_stft_features(signal, sample_rate) # Get the magnitude spectrogram mag_spec = np.abs(spectrogram) # Scale the magnitude spectrogram with a square root squashing, and percent # normalization X_in = np.sqrt(mag_spec) m = X_in.min() M = X_in.max() X_in = (X_in - m)/(M - m) return spectrogram, X_in
d2b6c5cb700ae877f7bf8bd4b5a772471e69a75d
3,796
def get_frameheight(): """return fixed height for extra panel """ return 120
3bd810eea77af15527d3c1df7ab0b788cfe90000
3,797
def default_heart_beat_interval() -> int: """ :return: in seconds """ return 60
58171c8fb5632aa2aa46de8138828cce2eaa4d33
3,798
import re def email_valid(email): """test for valid email address >>> email_valid('[email protected]') True >>> email_valid('test@@testco.com') False >>> email_valid('test@testco') False """ if email == '': return True email_re = re.compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+" r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|' r'\\[\001-011\013\014\016-\177])*"' # quoted-string r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)' r'+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain return bool(email_re.match(email))
c76a621647595c741b1da71734a34372919e800f
3,799