content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def my_model_builder(my_model: MyModel) -> KerasModel: """Build the siamese network model """ input_1 = layers.Input(my_model.input_shape) input_2 = layers.Input(my_model.input_shape) # As mentioned above, Siamese Network share weights between # tower networks (sister networks). To allow this, we will use # same embedding network for both tower networks. embedding_network = build_model_tower(my_model) tower_1 = embedding_network(input_1) tower_2 = embedding_network(input_2) merge_layer = layers.Lambda(euclidean_distance)([tower_1, tower_2]) normal_layer = tf.keras.layers.BatchNormalization()(merge_layer) output_layer = layers.Dense(1, activation="sigmoid")(normal_layer) keras_model = keras.Model(inputs=[input_1, input_2], outputs=output_layer) keras_model.compile( loss=my_model.loss, optimizer=my_model.optimizer, metrics=my_model.metrics ) return keras_model
53b32468469e7fc8cbc8f2776a1711181363bf60
6,600
from datetime import datetime def create_embed( title, description, fields = None, colour = None, timestamp = datetime.utcnow(), author = None, author_icon = None, thumbnail = None, image = None, footer = None ): """Create an Embed Args: title (str): Set title description (str): Set description fields (list of tuples): Set fields colour (int, optional): Set color. Defaults to None. timestamp (datetime, optional): Set timestamp. Defaults to current time. author (str, optional): Set author. Defaults to None. author_icon (str, optional): Set author icon using image url. Defaults to None. thumbnail (str, optional): Set thumbnail using image url. Defaults to None. image (str, optional): Set image using image url. Defaults to None. footer (str, optional): Set footer. Defaults to None. Returns: embed: returns an embed """ embed = Embed( title=title, description=description, colour=colour, timestamp=timestamp ) if fields is not None: for name, value, inline in fields: embed.add_field(name=name, value=value, inline=inline) embed.set_author(name=author, icon_url=author_icon) embed.set_footer(text=footer) embed.set_thumbnail(url=thumbnail) embed.set_image(url=image) return embed
741238fc50e2eda98a5cbfeab0c5c4d5cb3adbf2
6,601
def autoCalibration(I): """Returns horizontal and vertical factors by which every distance in pixels should be multiplied in order to obtain the equivalent distance in millimeters. This program assumes that the scale presents clear axis ticks and that the distance between two biggest ticks is equal to 10 mm. It also assumes that both horizontal and vertical scales are present in the up right quarter of image I. Args: I (array): one canal image. If I is a RGB image, it is transformed to a grayscale image. Returns: calibFactorX (double) and calibFactorY (double) are respectively the horizontal and vertical calibration factors """ #Check if I is a 1-canal image if len(I.shape) > 2: I = cv2.cvtColor(I, cv2.COLOR_RGB2GRAY) length, width = I.shape[0], I.shape[1] #Cropping with empirical percentages and binarization of the selection # !!! EMPIRICAL TCP = 0.1 #Top cropping percentage - #empirical percentage LCP = 0.5 #Left cropping percentage BCP = 0.65 #Bottom cropping percentage RCP = 0.1 #Right cropping percentage Scale_image = I[int(TCP * length):length-int(BCP * length),\ int(LCP * width):width-int(RCP * width)] Binar_I = cv2.threshold(Scale_image, 220., 255, cv2.THRESH_BINARY)[1] #Selection of the biggest axis ticks: contours of white objects are found as #well as minimal rectangles encapsulating each object. Conditions on the #size of these contours/bounding rectangles enable the removal of objects #that are not the biggest ticks contours = cv2.findContours(Binar_I, cv2.RETR_EXTERNAL, \ cv2.CHAIN_APPROX_NONE)[0] contours_size = [contours[i].size for i in range (len(contours))] BoundingRectangles = [] for i in range(len(contours)): if contours_size[i]<=1.7*np.mean(contours_size): #condition to stop considering the objects corresponding to figures p1, p2, l1, l2 = cv2.boundingRect(contours[i]) #rectangles identified with point (p1,p2) and vectors (l2,0), (0,l1) BoundingRectangles.append([i, (p1,p2,l1,l2), 2.*l1+2.*l2]) MeanPerim = np.mean([BoundingRectangles[i][2] for i in range(len(BoundingRectangles))]) Dashes = [BoundingRectangles[i] for i in range(len(BoundingRectangles)) if BoundingRectangles[i][2]>MeanPerim] #removal of points and small dashes #Calculation of the minimal distances between two horizontal ticks and #two vertical ticks #browse all detected axis ticks horiz = 10000000. vertic = 10000000. for i in range (0, len(Dashes)-1): ref_Dash = Dashes[i][1] for j in range(i+1,len(Dashes)): if len(set(list(range(Dashes[j][1][0],Dashes[j][1][0]+Dashes[j][1][2])))\ .intersection(list(range(ref_Dash[0],ref_Dash[0]+ref_Dash[2]))))>2: h = abs(ref_Dash[1]+ref_Dash[3]-Dashes[j][1][1]-Dashes[j][1][3]) if h<vertic: vertic = h if len(set(list(range(Dashes[j][1][1],Dashes[j][1][1]+Dashes[j][1][3])))\ .intersection(list(range(ref_Dash[1],ref_Dash[1]+ref_Dash[3]))))>2: h = abs(ref_Dash[0]-Dashes[j][1][0]) if h<horiz: horiz = h #Factors to convert distance in pixels into distance in millimeters if horiz == 10000000. or horiz == 0: calibFactorX = None else: calibFactorX = 10./horiz if vertic == 10000000. or vertic == 0: calibFactorY = None else: calibFactorY = 10./vertic ''' visual check for d in range(len(Dashes)): p1 = Dashes[d][1][0] p2 = Dashes[d][1][1] l1 = Dashes[d][1][2] l2 = Dashes[d][1][3] for l in range(p1,p1+l1+1): Binar_I[p2,l] = 150 Binar_I[p2+l2,l] = 150 for c in range(p2,p2+l2+1): Binar_I[c,p1] = 150 Binar_I[c,p1+l1] = 150 cv2.imshow('Binary image', Binar_I) cv2.waitKey(0) & 0xFF cv2.destroyAllWindows() ''' return calibFactorX, calibFactorY
30016c41a8b21531cfd277a669a6b16b01322387
6,602
def reverse_handler(handler_input): """Check if a verb is provided in slot values. If provided, then looks for the paradigm in the irregular_verbs file. If not, then it asks user to provide the verb again. """ # iterate over the dictionaries in irregular_verbs.py and looks for # the verb in the slot. If it finds it, it returns the dictionary def get_verb(irregular_verbs, filled_verboconiugato_slot): for dictionary in IRREGULAR_VERBS["verbs"]: if dictionary["PS"] == verboconiugato or dictionary["PP"] == verboconiugato: return dictionary # type: (HandlerInput) -> Response attribute_manager = handler_input.attributes_manager session_attr = attribute_manager.session_attributes slots = handler_input.request_envelope.request.intent.slots if verboconiugato_slot in slots: # if slot is filled verboconiugato = slots[verboconiugato_slot].value handler_input.attributes_manager.session_attributes[ verboconiugato_slot_key] = verboconiugato # verbo is equal to what i said ex. know # execute the function based on the verb the user asks for. askedVerb # becomes equal to the dictionary returned by the function askedVerb = get_verb(irregular_verbs.IRREGULAR_VERBS, verboconiugato) if verboconiugato == "read" and askedVerb: baseVerb = askedVerb["Base"] pastSimple = askedVerb["PS"] pastPart = askedVerb["PP"] traduzione = askedVerb["Italiano"] speech = ("<lang xml:lang='en-GB'>{}</lang> è il verbo <voice name='Emma'><lang xml:lang='en-GB'>to {}</lang></voice>. Il suo paradigma è <voice name='Emma'><lang xml:lang='en-GB'>to {}, <phoneme alphabet='ipa' ph='rɛd'>{}</phoneme>, <phoneme alphabet='ipa' ph='rɛd'>{}</phoneme></lang></voice>. Significa <phoneme alphabet='ipa' ph='ˈlɛdʤere'>{}</phoneme>.".format(verboconiugato, baseVerb, baseVerb, pastSimple, pastPart, traduzione)) reprompt = ("Cosa vuoi chiedermi?") handler_input.response_builder.set_should_end_session(True) elif askedVerb: baseVerb = askedVerb["Base"] pastSimple = askedVerb["PS"] pastPart = askedVerb["PP"] traduzione = askedVerb["Italiano"] speech = ("<lang xml:lang='en-GB'>{}</lang> è il verbo <voice name='Emma'><lang xml:lang='en-GB'>to {}</lang></voice>. Il suo paradigma è <voice name='Emma'><lang xml:lang='en-GB'>to {}, {}, {}</lang></voice>. Significa {}.".format( verboconiugato, baseVerb, baseVerb, pastSimple, pastPart, traduzione)) reprompt = ("Cosa vuoi chiedermi?") handler_input.response_builder.set_should_end_session(True) else: speech = ( "Non trovo il verbo <lang xml:lang='en-GB'>{}</lang>. Se è corretto, allora la sua coniugazione è regolare".format(verboconiugato)) reprompt = ("Cosa vuoi chiedermi?") handler_input.response_builder.set_should_end_session(True) # if slot isn't filled, repeat helptext else: speech = ("Non ho capito." + help_text) handler_input.response_builder.ask(help_text) handler_input.response_builder.speak(speech).ask( reprompt).set_should_end_session(True) return handler_input.response_builder.response
f1b49b88314f3218af03c6910d72729f888f2a11
6,603
def load_wrf_data(filename): """Load required data form the WRF output file : filename""" base_data=load_vars(filename,wrfvars) skin_t=load_tskin(filename,tsvar,landmask) base_data.append(skin_t) atts=mygis.read_atts(filename,global_atts=True) return Bunch(data=base_data,global_atts=atts)
da6439d3d4adfc8b84d5bf5911aa5e4b9d628baa
6,604
def DensityRatio_QP(X_den, X_num, kernel, g, v_matrix, ridge=1e-3): """ The function computes a model of the density ratio. The function is in the form $A^T K$ The function returns the coefficients $\alpha_i$ and the bias term b """ l_den, d = X_den.shape l_num, d_num = X_num.shape # TODO: Check d==d_num ones_num = np.matrix(np.ones(shape=(l_num, 1))) zeros_den = np.matrix(np.zeros(shape=(l_den, 1))) gram = kernel(X_den) K = np.matrix(gram + ridge * np.eye(l_den)) # K = np.matrix(gram) # No ridge print("K max, min: %e, %e" % (np.max(K), np.min(K))) data = np.concatenate((X_den, X_num)) if callable(v_matrix): V = np.matrix(v_matrix(X_den, X_den, data)) V_star = np.matrix(v_matrix(X_den, X_num, data)) # l_den by l_num else: return -1 print("V max,min: %e, %e" % (np.max(V), np.min(V))) print("V_star max,min: %e, %e" % (np.max(V_star), np.min(V_star))) tgt1 = K * V * K print("K*V*K max, min: %e, %e" % (np.max(tgt1), np.min(tgt1))) tgt2 = g * K print("g*K max, min: %e, %e" % (np.max(tgt2), np.min(tgt2))) P = cvxopt.matrix(2 * (tgt1 + tgt2)) q_ = -2 * (l_den / l_num) * (K * V_star * ones_num) print("q max, min: %e, %e" % (np.max(q_), np.min(q_))) q = cvxopt.matrix(q_) #### Let's construct the inequality constraints # Now create G and h G = cvxopt.matrix(-K) h = cvxopt.matrix(zeros_den) # G = cvxopt.matrix(np.vstack((-K,-np.eye(l_den)))) # h = cvxopt.matrix(np.vstack((zeros_den,zeros_den))) # Let's construct the equality constraints A = cvxopt.matrix((1 / l_den) * K * V_star * ones_num).T b = cvxopt.matrix(np.ones(1)) return cvxopt.solvers.qp(P, q, G, h, A, b, options=dict( maxiters=50))
945f0fe26a1cea10a85e20944a5fe0d0f9c6427b
6,605
def sanitize_date(date_dict: dict): """ Function to take the date values entered by the user and check their validity. If valid it returns True, otherwise it sets the values to None and returns False :param date_dict: :return: """ month = date_dict["month"] day = date_dict["day"] year = date_dict["year"] date = [month, day, year] date_is_valid = not any([component is None for component in date]) if date_is_valid: date_is_valid &= not (month == 2 and day > 29) date_is_valid &= not (month in [4, 6, 9, 11] and day > 30) is_leap_year = (year % 4) == 0 is_leap_year &= ((year % 100) != 0 or (year % 400) == 0) date_is_valid &= not (month == 2 and day == 29 and not is_leap_year) if not date_is_valid: date_dict["month"] = date_dict["day"] = date_dict["year"] = None return False return True
c8cc01c8c1259ab8c4b263e36ae9f85a95356017
6,606
def create_scale(tonic, pattern, octave=1): """ Create an octave-repeating scale from a tonic note and a pattern of intervals Args: tonic: root note (midi note number) pattern: pattern of intervals (list of numbers representing intervals in semitones) octave: span of scale (in octaves) Returns: list of midi notes in the scale """ assert(sum(pattern)==12) scale = [tonic] note = tonic for o in range(octave): for i in pattern: note += i if note <= 127: scale.append(note) return scale
f9337289fda2e1b08cd371d3e91cc5a23c9c9822
6,607
def _qfloat_append(qf, values, axis=None): """Implement np.append for qfloats.""" # First, convert to the same unit. qf1, qf2 = same_unit(qf, values, func=np.append) nominal = np.append(qf1.nominal, qf2.nominal, axis) std = np.append(qf1.uncertainty, qf2.uncertainty, axis) return QFloat(nominal, std, qf1.unit)
46049a2ba43997578ae502acd395cfa767e623ca
6,608
from typing import List def filter_by_mean_color(img:np.ndarray, circles:List[Circle], threshold=170) -> List[Circle]: """Filter circles to keep only those who covers an area which high pixel mean than threshold""" filtered = [] for circle in circles: box = Box(circle=circle) area = box.get_region(img) if np.mean(area) > threshold: filtered.append(circle) return filtered
d23f92d363cd4df70ba0d0d01450865546d7f289
6,609
def ParseSortByArg(sort_by=None): """Parses and creates the sort by object from parsed arguments. Args: sort_by: list of strings, passed in from the --sort-by flag. Returns: A parsed sort by string ending in asc or desc. """ if not sort_by: return None fields = [] for field in sort_by: if field.startswith('~'): field = field.lstrip('~') + ' desc' else: field += ' asc' fields.append(field) return ','.join(fields)
cc2c40d8d810396420e5c3ede0d65159ed21d6bc
6,610
def dense_to_text(decoded, originals): """ Convert a dense, integer encoded `tf.Tensor` into a readable string. Create a summary comparing the decoded plaintext with a given original string. Args: decoded (np.ndarray): Integer array, containing the decoded sequences. originals (np.ndarray): String tensor, containing the original input string for comparision. `originals` can be an empty tensor. Returns: np.ndarray: 1D string Tensor containing only the decoded text outputs. [decoded_string_0, ..., decoded_string_N] np.ndarray: 2D string Tensor with layout: [[decoded_string_0, original_string_0], ... [decoded_string_N, original_string_N]] """ decoded_strings = [] original_strings = [] for d in decoded: decoded_strings.append(''.join([itoc(i) for i in d])) if len(originals) > 0: for o in originals: original_strings.append(''.join([c for c in o.decode('utf-8')])) else: original_strings = ['n/a'] * len(decoded_strings) decoded_strings = np.array(decoded_strings, dtype=np.object) original_strings = np.array(original_strings, dtype=np.object) summary = np.vstack([decoded_strings, original_strings]) return np.array(decoded_strings), summary
d7d4ec6ef2653a4e9665711201cef807a6c9830b
6,611
def admin_view_all_working_curriculums(request): """ views all the working curriculums offered by the institute """ user_details = ExtraInfo.objects.get(user = request.user) des = HoldsDesignation.objects.all().filter(user = request.user).first() if str(des.designation) == "student" or str(des.designation) == "Associate Professor" or str(des.designation) == "Professor" or str(des.designation) == "Assistant Professor" : return HttpResponseRedirect('/programme_curriculum/mainpage/') elif str(request.user) == "acadadmin" : pass curriculums = Curriculum.objects.filter(working_curriculum=1) return render(request,'programme_curriculum/acad_admin/admin_view_all_working_curriculums.html',{'curriculums':curriculums})
8ba99fe5712c8a93b62e2ab0c9e22594a442d9bd
6,612
def getEmuAtVa(vw, va, maxhit=None): """ Build and run an emulator to the given virtual address from the function entry point. (most useful for state analysis. kinda heavy though...) """ fva = vw.getFunction(va) if fva == None: return None cbva,cbsize,cbfva = vw.getCodeBlock(va) fgraph = v_graphutil.buildFunctionGraph(vw, fva) # Just take the first one off the iterator... for path in v_graphutil.getCodePathsTo(fgraph, cbva): emu = vw.getEmulator() opcodes = v_graphutil.getOpsFromPath(vw, fgraph, path) for op in opcodes: if op.va == va: break emu.executeOpcode(op) return emu
bea812a1d74b39e9ba83fde56bf90e4055425b89
6,613
def _create_test_validity_conditional(metric): """Creates BigQuery SQL clauses to specify validity rules for an NDT test. Args: metric: (string) The metric for which to add the conditional. Returns: (string) A set of SQL clauses that specify conditions an NDT test must meet to be considered a valid, completed test. """ # NDT test is supposed to last 10 seconds, give some buffer for tests that # ended slighly before 10 seconds. MIN_DURATION = _seconds_to_microseconds(9) # Tests that last > 1 hour are likely erroneous. MAX_DURATION = _seconds_to_microseconds(3600) # A test that did not exchange at least 8,192 bytes is likely erroneous. MIN_BYTES = 8192 # web100 state variable constants from # http://www.web100.org/download/kernel/tcp-kis.txt STATE_CLOSED = 1 STATE_ESTABLISHED = 5 STATE_TIME_WAIT = 11 # For RTT metrics, exclude results of tests with 10 or fewer round trip time # samples, because there are not enough samples to accurately estimate the # RTT. MIN_RTT_SAMPLES = 10 conditions = [] # Must have completed the TCP three-way handshake. conditions.append(( '(web100_log_entry.snap.State = {state_closed}\n\t' '\tOR (web100_log_entry.snap.State >= {state_established}\n\t' '\t\tAND web100_log_entry.snap.State <= {state_time_wait}))').format( state_closed=STATE_CLOSED, state_established=STATE_ESTABLISHED, state_time_wait=STATE_TIME_WAIT)) # Must have been determined to be unaffected by platform error. conditions.append(('blacklist_flags == 0')) if _is_server_to_client_metric(metric): # Must leave slow start phase of TCP, indicated by reaching # congestion at least once. conditions.append('web100_log_entry.snap.CongSignals > 0') # Must send at least the minimum number of bytes. conditions.append('web100_log_entry.snap.HCThruOctetsAcked >= %d' % MIN_BYTES) # Must last for at least the minimum test duration. conditions.append( ('(web100_log_entry.snap.SndLimTimeRwin +\n\t' '\tweb100_log_entry.snap.SndLimTimeCwnd +\n\t' '\tweb100_log_entry.snap.SndLimTimeSnd) >= %u') % MIN_DURATION) # Must not exceed the maximum test duration. conditions.append( ('(web100_log_entry.snap.SndLimTimeRwin +\n\t' '\tweb100_log_entry.snap.SndLimTimeCwnd +\n\t' '\tweb100_log_entry.snap.SndLimTimeSnd) < %u') % MAX_DURATION) # Exclude results of tests with fewer than 10 round trip time samples, # because there are not enough samples to accurately estimate the RTT. if metric == 'minimum_rtt' or metric == 'average_rtt': conditions.append('web100_log_entry.snap.CountRTT > %u' % MIN_RTT_SAMPLES) else: # Must receive at least the minimum number of bytes. conditions.append('web100_log_entry.snap.HCThruOctetsReceived >= %u' % MIN_BYTES) # Must last for at least the minimum test duration. conditions.append('web100_log_entry.snap.Duration >= %u' % MIN_DURATION) # Must not exceed the maximum test duration. conditions.append('web100_log_entry.snap.Duration < %u' % MAX_DURATION) return '\n\tAND '.join(conditions)
8c65150bdbed3ba75546fc64d8b322d9950339c1
6,614
from typing import Union from typing import Sequence def tfds_train_test_split( tfds: tf.data.Dataset, test_frac: float, dataset_size: Union[int, str], buffer_size: int = 256, seed: int = 123, ) -> Sequence[Union[tf.data.Dataset, tf.data.Dataset, int, int]]: """ !!! does not properly work, seems to be dependant on hardware, open isssue on github/tensorflow? Split tf-dataset into a train and test dataset. https://stackoverflow.com/questions/48213766/split-a-dataset-created-by-tensorflow-dataset-api-in-to-train-and-test Args: tfds (tf.data.Dataset): Tf-dataset, that will be split into a train- and testset. test_frac (float): Fract Returns: [tf.data.Dataset, tf.data.Dataset, int, int]: Returns train and test datasets as well as the absolut sizes of the full and the train dataset. """ logger.warning( "This methods of data splitting does not gurantee same split on every machine.") full_ds_size = None if dataset_size == "auto": logger.warning( "dataset_size='auto': In order to calculate the size of the dataset, all " "samples will be loaded.") full_ds_size = get_tfds_size(tfds) elif isinstance(dataset_size, int): full_ds_size = dataset_size logger.info(f"Using following seed to shuffle data: {seed}") tfds = tfds.shuffle(buffer_size, reshuffle_each_iteration=False, seed=seed) train_ds_fraction = 1.0 - test_frac train_ds_size = int(train_ds_fraction * full_ds_size) logger.info(f"train dataset size: {train_ds_size}, val dataset size: " "{full_ds_size - train_ds_size}") train_dataset = tfds.take(train_ds_size) test_dataset = tfds.skip(train_ds_size) return train_dataset, test_dataset, full_ds_size, train_ds_size
8bbb554eca8a09716279a5d818e1cc3e7bd5ad16
6,615
from datetime import datetime def seconds_to_hms(seconds): """ Convert seconds to H:M:S format. Works for periods over 24H also. """ return datetime.timedelta(seconds=seconds)
e862be76c6ef6b76f8e4f6351e033193ddefd5b8
6,616
import argparse import multiprocessing def parse_args(args): """ Takes in the command-line arguments list (args), and returns a nice argparse result with fields for all the options. Borrows heavily from the argparse documentation examples: <http://docs.python.org/library/argparse.html> """ # The command line arguments start with the program name, which we don't # want to treat as an argument for argparse. So we remove it. args = args[1:] # Construct the parser (which is stored in parser) # Module docstring lives in __doc__ # See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847 # And a formatter class so our examples in the docstring look good. Isn't it # convenient how we already wrapped it to 80 characters? # See http://docs.python.org/library/argparse.html#formatter-class parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) # Now add all the options to it parser.add_argument("fasta", type=str, help="name of the FASTA to read; must be indexable") parser.add_argument("-n", type=int, default=10000, help="the number of k-mers to count") parser.add_argument("-k", type=int, default=150, help="the length of each k-mer") parser.add_argument("--thread_count", type=int, default=multiprocessing.cpu_count(), help="number of k-mer counting threads to use") parser.add_argument("--batch_size", type=int, default=10000, help="number of forward-strand k-mer candidates to count in each batch") parser.add_argument("--bloom_error", type=float, default=1E-4, help="error rate on the bloom filter") return parser.parse_args(args)
9ab9bad96af383a8d7441d7ccb0da10ae68dafb5
6,617
def _add_spot_2d(image, ground_truth, voxel_size_yx, precomputed_gaussian): """Add a 2-d gaussian spot in an image. Parameters ---------- image : np.ndarray, np.uint A 2-d image with shape (y, x). ground_truth : np.ndarray Ground truth array with shape (nb_spots, 4). - coordinate_y - coordinate_x - sigma_yx - amplitude voxel_size_yx : int or float Size of a voxel on the yx plan, in nanometer. precomputed_gaussian : Tuple[np.ndarray] Tuple with one tables of precomputed values for the erf, with shape (nb_value, 2). One table per dimension. Returns ------- new_image : np.ndarray, np.uint A 2-d image with simulated spots and shape (y, x). """ # cast image original_dtype = image.dtype image = image.astype(np.float64) # compute reference spot shape max_sigma = max(ground_truth[:, 2]) radius_yx, _ = stack.get_radius( voxel_size_z=None, voxel_size_yx=voxel_size_yx, psf_z=None, psf_yx=max_sigma) radius_yx = np.ceil(radius_yx).astype(np.int64) yx_shape = radius_yx * 2 + 1 # build a grid to represent a spot image image_spot = np.zeros((yx_shape, yx_shape), dtype=np.uint8) grid = detection.initialize_grid( image_spot=image_spot, voxel_size_z=None, voxel_size_yx=voxel_size_yx, return_centroid=False) # pad image image_padded = np.pad(image, ((radius_yx, radius_yx), (radius_yx, radius_yx)), mode="constant") # loop over every spot for (coord_y, coord_x, sigma_yx, amp) in ground_truth: # simulate spot signal position_spot = np.asarray((radius_yx, radius_yx), dtype=np.int64) position_spot = np.ravel_multi_index( position_spot, dims=image_spot.shape) position_spot = list(grid[:, position_spot]) simulated_spot = detection.gaussian_2d( grid=grid, mu_y=position_spot[0], mu_x=position_spot[1], sigma_yx=sigma_yx, voxel_size_yx=voxel_size_yx, psf_amplitude=amp, psf_background=0, precomputed=precomputed_gaussian) simulated_spot = np.reshape(simulated_spot, image_spot.shape) # add spot coord_y_min = int(coord_y) coord_y_max = int(coord_y + 2 * radius_yx + 1) coord_x_min = int(coord_x) coord_x_max = int(coord_x + 2 * radius_yx + 1) image_padded[coord_y_min:coord_y_max, coord_x_min:coord_x_max] += simulated_spot # unpad image image = image_padded[radius_yx:-radius_yx, radius_yx:-radius_yx] image_raw = np.reshape(image, image.size) # sample Poisson distribution from gaussian values image_raw = np.random.poisson(lam=image_raw, size=image_raw.size) # reshape and cast image new_image = np.reshape(image_raw, image.shape) new_image = np.clip(new_image, 0, np.iinfo(original_dtype).max) new_image = new_image.astype(original_dtype) return new_image
45c32a181df1d0239b0ad872d7c0ad83862338ed
6,618
import requests def bing(text, bot): """<query> - returns the first bing search result for <query>""" api_key = bot.config.get("api_keys", {}).get("bing_azure") # handle NSFW show_nsfw = text.endswith(" nsfw") # remove "nsfw" from the input string after checking for it if show_nsfw: text = text[:-5].strip().lower() rating = NSFW_FILTER if show_nsfw else DEFAULT_FILTER if not api_key: return "Error: No Bing Azure API details." # why are these all differing formats and why does format have a $? ask microsoft params = { "Sources": bingify("web"), "Query": bingify(text), "Adult": bingify(rating), "$format": "json" } request = requests.get(API_URL, params=params, auth=(api_key, api_key)) # I'm not even going to pretend to know why results are in ['d']['results'][0] j = request.json()['d']['results'][0] if not j["Web"]: return "No results." result = j["Web"][0] # not entirely sure this even needs un-escaping, but it wont hurt to leave it in title = formatting.truncate(unescape(result["Title"]), 60) desc = formatting.truncate(unescape(result["Description"]), 150) url = unescape(result["Url"]) return colors.parse('{} -- $(b){}$(b): "{}"'.format(url, title, desc))
5aa5fe7acdc64c815d4a8727b06c13f1e3e3b2ce
6,619
from typing import List def single_length_RB( RB_number: int, RB_length: int, target: int = 0 ) -> List[List[str]]: """Given a length and number of repetitions it compiles Randomized Benchmarking sequences. Parameters ---------- RB_number : int The number of sequences to construct. RB_length : int The number of Cliffords in each individual sequence. target : int Index of the target qubit Returns ------- list List of RB sequences. """ S = [] for _ in range(RB_number): seq = np.random.choice(24, size=RB_length - 1) + 1 seq = np.append(seq, inverseC(seq)) seq_gates = [] for cliff_num in seq: g = [f"{c}[{target}]" for c in cliffords_decomp[cliff_num - 1]] seq_gates.extend(g) S.append(seq_gates) return S
dda3f5a191c666460fc4c791c33530940986b623
6,620
def decode(text_file_abs_path, threshold=10): """ Decodes a text into a ciphertext. Parameters --------- text_file_abs_path: str Returns ------- ciphertext: str """ try: with open(text_file_abs_path, "rb") as f: text = f.read() except Exception: return None freq_limit = limit_freq_threshold(threshold) renamed_ciphertext = ''.join( MarkovToolbox.derive_first_letter_of_every_sentence(text)) ciphertext = revert_renamed_number(renamed_ciphertext, freq_limit) if threshold != 10: ciphertext = NumericalToolbox.change_base(ciphertext, threshold, 10, standard_formatting=False) return ciphertext
c0c8c96438baedda43940e2373edc4714511b507
6,621
def templatetag(parser, token): """ Outputs one of the bits used to compose template tags. Since the template system has no concept of "escaping", to display one of the bits used in template tags, you must use the ``{% templatetag %}`` tag. The argument tells which template bit to output: ================== ======= Argument Outputs ================== ======= ``openblock`` ``{%`` ``closeblock`` ``%}`` ``openvariable`` ``{{`` ``closevariable`` ``}}`` ``openbrace`` ``{`` ``closebrace`` ``}`` ``opencomment`` ``{#`` ``closecomment`` ``#}`` ================== ======= """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments bits = token.contents.split() if len(bits) != 2: raise TemplateSyntaxError("'templatetag' statement takes one argument") tag = bits[1] if tag not in TemplateTagNode.mapping: raise TemplateSyntaxError("Invalid templatetag argument: '%s'." " Must be one of: %s" % (tag, list(TemplateTagNode.mapping))) return TemplateTagNode(tag)
3b441ec3035f8efde9fd2507ab83c83ec5940a7a
6,622
def reflected_phase_curve(phases, omega, g, a_rp): """ Reflected light phase curve for a homogeneous sphere by Heng, Morris & Kitzmann (2021). Parameters ---------- phases : `~np.ndarray` Orbital phases of each observation defined on (0, 1) omega : tensor-like Single-scattering albedo as defined in g : tensor-like Scattering asymmetry factor, ranges from (-1, 1). a_rp : float, tensor-like Semimajor axis scaled by the planetary radius Returns ------- flux_ratio_ppm : tensor-like Flux ratio between the reflected planetary flux and the stellar flux in units of ppm. A_g : tensor-like Geometric albedo derived for the planet given {omega, g}. q : tensor-like Integral phase function """ # Convert orbital phase on (0, 1) to "alpha" on (0, np.pi) alpha = jnp.asarray(2 * np.pi * phases - np.pi) abs_alpha = jnp.abs(alpha) alpha_sort_order = jnp.argsort(alpha) sin_abs_sort_alpha = jnp.sin(abs_alpha[alpha_sort_order]) sort_alpha = alpha[alpha_sort_order] gamma = jnp.sqrt(1 - omega) eps = (1 - gamma) / (1 + gamma) # Equation 34 for Henyey-Greestein P_star = (1 - g ** 2) / (1 + g ** 2 + 2 * g * jnp.cos(alpha)) ** 1.5 # Equation 36 P_0 = (1 - g) / (1 + g) ** 2 # Equation 10: Rho_S = P_star - 1 + 0.25 * ((1 + eps) * (2 - eps)) ** 2 Rho_S_0 = P_0 - 1 + 0.25 * ((1 + eps) * (2 - eps)) ** 2 Rho_L = 0.5 * eps * (2 - eps) * (1 + eps) ** 2 Rho_C = eps ** 2 * (1 + eps) ** 2 alpha_plus = jnp.sin(abs_alpha / 2) + jnp.cos(abs_alpha / 2) alpha_minus = jnp.sin(abs_alpha / 2) - jnp.cos(abs_alpha / 2) # Equation 11: Psi_0 = jnp.where( (alpha_plus > -1) & (alpha_minus < 1), jnp.log((1 + alpha_minus) * (alpha_plus - 1) / (1 + alpha_plus) / (1 - alpha_minus)), 0 ) Psi_S = 1 - 0.5 * (jnp.cos(abs_alpha / 2) - 1.0 / jnp.cos(abs_alpha / 2)) * Psi_0 Psi_L = (jnp.sin(abs_alpha) + (np.pi - abs_alpha) * jnp.cos(abs_alpha)) / np.pi Psi_C = (-1 + 5 / 3 * jnp.cos(abs_alpha / 2) ** 2 - 0.5 * jnp.tan(abs_alpha / 2) * jnp.sin(abs_alpha / 2) ** 3 * Psi_0) # Equation 8: A_g = omega / 8 * (P_0 - 1) + eps / 2 + eps ** 2 / 6 + eps ** 3 / 24 # Equation 9: Psi = ((12 * Rho_S * Psi_S + 16 * Rho_L * Psi_L + 9 * Rho_C * Psi_C) / (12 * Rho_S_0 + 16 * Rho_L + 6 * Rho_C)) flux_ratio_ppm = 1e6 * (a_rp ** -2 * A_g * Psi) q = _integral_phase_function( Psi, sin_abs_sort_alpha, sort_alpha, alpha_sort_order ) return flux_ratio_ppm, A_g, q
e6f9fadaec4614b5ea0058d13956cb5ef13d57b5
6,623
from operator import pos def costspec( currencies: list[str] = ["USD"], ) -> s.SearchStrategy[pos.CostSpec]: """Generates a random CostSpec. Args: currencies: An optional list of currencies to select from. Returns: A new search strategy. """ return s.builds(pos.CostSpec, currency=common.currency(currencies))
4147a7046e5d4b16a6f919d77683a849ceb2ce54
6,624
import gzip import os import json def get_gene_names_conversion(): """Get the compressed file containing two-way mappings of gene_id to gene_symbol""" with gzip.open( os.path.join( current_app.config["FIVEX_DATA_DIR"], "gene.id.symbol.map.json.gz", ), "rt", ) as f: return json.loads(f.read())
d04aef6993c3f5c2309c518798978e4fc225c8e9
6,625
import json import base64 def _process_input(data, context): """ Pre-process request input before it is sent to TensorFlow Serving REST API Args: data (obj): the request data stream context (Context): an object containing request and configuration details Returns: (dict): a JSON-serializable dict that contains request body and headers """ read_data = data.read() # endpoint API if context.request_content_type == 'application/json': # read as numpy array image_np = np.asarray(json.loads(read_data)).astype(np.dtype('uint8')) image_np = np.array(Image.fromarray(image_np).resize((INPUT_HEIGHT,INPUT_WIDTH))) # batch transform of jpegs elif context.request_content_type == 'application/x-image': # load image from bytes and resize image_from_bytes = Image.open(BytesIO(read_data)).convert('RGB') image_from_bytes = image_from_bytes.resize((INPUT_HEIGHT,INPUT_WIDTH)) image_np = np.array(image_from_bytes) # batch transform of tfrecord elif context.request_content_type == 'application/x-tfexample': example = tf.train.Example() example.ParseFromString(read_data) example_feature = MessageToDict(example.features) image_encoded = str.encode(example_feature['feature']['image']['bytesList']['value'][0]) image_b64 = base64.decodebytes(image_encoded) image_np = np.frombuffer(image_b64, dtype=np.dtype('uint8')).reshape(32,32,3) image_np = np.array(Image.fromarray(image_np).resize((INPUT_HEIGHT,INPUT_WIDTH))) # raise error if content type is not supported else: print("") _return_error(415, 'Unsupported content type "{}"'.format( context.request_content_type or 'Unknown')) # preprocess for resnet50 image_np = tf.keras.applications.resnet_v2.preprocess_input(image_np) # json serialize data_np_json = {"instances": [image_np.tolist()]} data_np_json_serialized = json.dumps(data_np_json) return data_np_json_serialized
542e9d04a8e93cb835f049ebd3c9105e24e3b5ac
6,626
def similarity_matrix_2d(X, Y, metric='cos'): """ Calculate similarity matrix Parameters: X: ndarray input matrix 1 Y: ndarray input matrix 2 distFunc: function distance function Returns: result: ndarray similarity matrix """ n_X = len(X) n_Y = len(Y) if metric == 'cos': dist_func = cos_dist_2d elif metric == 'euclid': dist_func = euclid_dist_2d elif metric == 'mahal': dist_func = mahal_dist_2d else: dist_func = cos_dist_2d #SM = sp.zeros((nX, nY)) SM = [map(dist_func, n_X * [X[i]], Y) for i in xrange(n_X)] #for i in xrange(nX): # SM.append(map(distFunc, nX * [X[i]], Y)) SM = sp.array(SM) return SM
02d78531347c3acb90049505297c009c845e27d2
6,627
def issue_list_with_tag(request, tag=None, sortorder=None): """ For a tag. display list of issues """ if tag: stag = "\"%s\"" % tag issues = Issue.tagged.with_any(stag) tag_cloud = [] if issues: tag_cloud = get_tagcloud_issues(issues) issues = issues.filter(is_draft=False) return issue_list( request, issues=issues, sortorder=sortorder, min_tv=1, subset=True, extra_context = { 'selected_tag' : tag, 'issue_tags' : tag_cloud, 'sort_url' : reverse('issue_with_tag', args=[tag,]), }) else: return issue_list(request)
1f51db85a0b5008819fda73d3d86fa184b56b327
6,628
def update_deal(id, deal_dict): """ Runs local validation on the given dict and gives passing ones to the API to update """ if utils.validate_deal_dict(utils.UPDATE, deal_dict, skip_id=True): resp = utils.request(utils.UPDATE, 'deals', {'id': id}, data=deal_dict) return utils.parse(resp) else: # validation failed but the exception was suppressed pass
82355c1a0204128f30b66a91fc22e3650b99f74d
6,629
def plot_tilt_hist(series, ntile: str, group_name: str, extra_space: bool = True): """ Plots the histogram group tilts for a single ntile :param series: frame containing the avg tilts, columns: group, index: pd.Period :param ntile: the Ntile we are plotting for :param group_name: the name of the group :return: None """ if extra_space: fig, ax = plt.subplots(1, 2, figsize=LARGE_FIGSIZE) else: _, ax = plt.subplots(1, 1, figsize=(4.5, 4.5)) title = 'Weight Relative to Universe' if 'Ntile' in group_name else 'Group Exposure' plotter_frame = series.to_frame('weight') plotter_frame['colors'] = [TILTS_COLOR_MAP(i) for i in np.linspace(0, 1, len(series))] plotter_frame = plotter_frame.sort_values('weight') ax[0].barh(plotter_frame.index.tolist(), plotter_frame['weight'].tolist(), align='center', color=plotter_frame['colors'].tolist()) ax[0].set(title=f'{ntile}, {group_name}'.title(), ylabel='Group', xlabel=title) ax[0].axvline(0, linestyle='-', color='black', lw=1) if extra_space: return ax[1] plt.show()
8f3077831cd11092e2a14bc60152ba693c0da6a6
6,630
def get_constraints_for_x(cell, board): """ Get the constraints for a given cell cell @param cell Class instance of Variable; a cell of the Sudoku board @param board @return Number of constraints """ nconstraints = 0 # Row for cellj in board[cell.row][:cell.col]: if cellj.get_domain_size() > 1: nconstraints += 1 for cellj in board[cell.row][cell.col+1:]: if cellj.get_domain_size() > 1: nconstraints += 1 # Col for irow in range(cell.row): if board[irow][cell.col].get_domain_size() > 1: nconstraints += 1 for irow in range(cell.row+1, cell.max_domain_val): if board[irow][cell.col].get_domain_size() > 1: nconstraints += 1 # .. This would not generalize to a new board, but leave for now ibox_row = int(cell.row/3) * 3 ibox_col = int(cell.col/3) * 3 if board[ibox_row+1][ibox_col+1].get_domain_size() > 1 \ or board[ibox_row+1][ibox_col+2].get_domain_size() > 1 \ or board[ibox_row+2][ibox_col+1].get_domain_size() > 1 \ or board[ibox_row+2][ibox_col+2].get_domain_size() > 1: nconstraints += 1 return nconstraints
a46cda54569a12e80b9d52896f07335480799cb1
6,631
def average(values): """Computes the arithmetic mean of a list of numbers. >>> print average([20, 30, 70]) 40.0 """ try: return stats.mean(values) except ZeroDivisionError: return None
85d02529404301891e0ecd1f2a9b76695a357504
6,632
def get_subgraph_pos(G, pos): """ Returns the filtered positions for subgraph G. If subgraph = original graph then pos will be returned. Parameters ---------- G : nx.Graph A graph object. Pos : dict A dictionary with nodes as keys and positions as values. Example ------- >>> pos = nx.spring_layout(G) >>> subgraph_nodes = ['1','2','3'] >>> subgraph = G.subgraph(subgraph_nodes) >>> subgraph_positions = get_subgraph_pos(subgraph,pos) Returns ------- dict Assuming positions were generated earlier for a larger graph with some layout algorithm this functions returns the filtered positions by the subgraph. """ return {k: v for k, v in pos.items() if k in G.nodes()}
ca7fc389cc51aaace7a751f2107fe5cfbfd22e6c
6,633
def _calculateWindowPosition(screenGeometry, iconGeometry, windowWidth, windowHeight): """ Calculate window position near-by the system tray using geometry of a system tray and window geometry @param screenGeometry: geometry of the screen where system tray is located @type screenGeometry: QRect @param iconGeometry: geometry of the system tray icon in screen coordinates @type iconGeometry: QRect @param windowWidth: width of the main window @type windowWidth: int @param windowHeight: height of the main window including header @type windowHeight: int @return: coordinates for main window positioning @rtype: QPoint """ possibleWindowPositions = { LEFT: { 'x': iconGeometry.x() + iconGeometry.width(), 'y': iconGeometry.y() + iconGeometry.height() / 2 - windowHeight / 2 }, BOTTOM: { 'x': iconGeometry.x() + iconGeometry.width() / 2 - windowWidth / 2, 'y': iconGeometry.y() - windowHeight }, RIGHT: { 'x': iconGeometry.x() - windowWidth, 'y': iconGeometry.y() + iconGeometry.height() / 2 - windowHeight / 2 }, TOP: { 'x': iconGeometry.x() + iconGeometry.width() / 2 - windowWidth / 2, 'y': iconGeometry.y() + iconGeometry.height() }, } position = possibleWindowPositions[_guessTrayPosition(screenGeometry, iconGeometry)] return QPoint(position['x'], position['y'])
112011828dcfd0a6a54b6fe2c3d8acd92baf6c64
6,634
def build_from_config(config: dict, name: str) -> HomingMotor: """Build the named HomingMotor from data found in config""" def check_for_key(key, cfg): if key not in cfg: raise RuntimeError('Key "{}" for HomingMotor "{}" not found.'.format(key, name)) else: return cfg[key] if name not in config: raise RuntimeError('Config for HomingMotor "{}" not found.'.format(name)) my_config = config[name] inverted = check_for_key('inverted', my_config) max_steps = check_for_key('max_steps', my_config) name = check_for_key('name', my_config) pulse_delay = float(check_for_key('pulse_delay', my_config)) sensor = check_for_key('sensor', my_config) stepper = check_for_key('stepper', my_config) dir_pin = int(check_for_key("dir_pin", stepper)) ms1_pin = int(check_for_key("ms1_pin", stepper)) ms2_pin = int(check_for_key("ms2_pin", stepper)) ms3_pin = int(check_for_key("ms3_pin", stepper)) step_pin = int(check_for_key("step_pin", stepper)) step_size = int(check_for_key("step_size", stepper)) input_pin = int(check_for_key('input_pin', sensor)) m = build(name, dir_pin, step_pin, ms1_pin, ms2_pin, ms3_pin, input_pin, max_steps, inverted, pulse_delay) m.set_step_size(step_size) # print('{} built from config OK'.format(m.get_name())) return m
ce39fc8db48da8145b9221120c3ec02f3bdda40f
6,635
def arg_return_greetings(name): """ This is greeting function with arguments and return greeting message :param name: :return: """ message = F"hello {name}" return message
23bab521832358692c3aa653c6138ffee13c4e7a
6,636
from typing import Any def basic_usage(card_id: str, parent: Any = None): """Basic usage of the application, minus the card recognition bits""" data = pull_card_data(card_id) qt_window = Card(parent, data) qt_window.setWindowTitle("YGO Scanner") qt_window.show() return qt_window
6960697ef12959a7aaaf47607c3026d0925b0a88
6,637
from typing import Dict from typing import Optional from pathlib import Path import json def get_abi( contract_sources: Dict[str, str], allow_paths: Optional[str] = None, remappings: Optional[list] = None, silent: bool = True, ) -> Dict: """ Generate ABIs from contract interfaces. Arguments --------- contract_sources : dict a dictionary in the form of {'path': "source code"} allow_paths : str, optional Compiler allowed filesystem import path remappings : list, optional List of solidity path remappings silent : bool, optional Disable verbose reporting Returns ------- dict Compiled ABIs in the format `{'contractName': [ABI]}` """ final_output = { Path(k).stem: { "abi": json.loads(v), "contractName": Path(k).stem, "type": "interface", "sha1": sha1(v.encode()).hexdigest(), } for k, v in contract_sources.items() if Path(k).suffix == ".json" } for path, source in [(k, v) for k, v in contract_sources.items() if Path(k).suffix == ".vy"]: input_json = generate_input_json({path: source}, language="Vyper") input_json["settings"]["outputSelection"]["*"] = {"*": ["abi"]} try: output_json = compile_from_input_json(input_json, silent, allow_paths) except Exception: # vyper interfaces do not convert to ABIs # https://github.com/vyperlang/vyper/issues/1944 continue name = Path(path).stem final_output[name] = { "abi": output_json["contracts"][path][name]["abi"], "contractName": name, "type": "interface", "sha1": sha1(contract_sources[path].encode()).hexdigest(), } solc_sources = {k: v for k, v in contract_sources.items() if Path(k).suffix == ".sol"} if solc_sources: compiler_targets = find_solc_versions(solc_sources, install_needed=True, silent=silent) for version, path_list in compiler_targets.items(): to_compile = {k: v for k, v in contract_sources.items() if k in path_list} set_solc_version(version) input_json = generate_input_json( to_compile, language="Vyper" if version == "vyper" else "Solidity", remappings=remappings, ) input_json["settings"]["outputSelection"]["*"] = {"*": ["abi"]} output_json = compile_from_input_json(input_json, silent, allow_paths) output_json = {k: v for k, v in output_json["contracts"].items() if k in path_list} final_output.update( { name: { "abi": data["abi"], "contractName": name, "type": "interface", "sha1": sha1(contract_sources[path].encode()).hexdigest(), } for path, v in output_json.items() for name, data in v.items() } ) return final_output
bd9eb4959796d549950f8dd0372ee42c95cd0dd6
6,638
def predict(w , b , X ): """ 使用学习逻辑回归参数logistic (w,b)预测标签是0还是1, 参数: w - 权重,大小不等的数组(num_px * num_px * 3,1) b - 偏差,一个标量 X - 维度为(num_px * num_px * 3,训练数据的数量)的数据 返回: Y_prediction - 包含X中所有图片的所有预测【0 | 1】的一个numpy数组(向量) """ m = X.shape[1] #图片的数量 Y_prediction = np.zeros((1,m)) w = w.reshape(X.shape[0],1) #计预测猫在图片中出现的概率 A = sigmoid(np.dot(w.T , X) + b) for i in range(A.shape[1]): #将概率a [0,i]转换为实际预测p [0,i] Y_prediction[0,i] = 1 if A[0,i] > 0.5 else 0 #使用断言 assert(Y_prediction.shape == (1,m)) return Y_prediction
4e258d7de1788d6da5c8a832ff11a5e8718b5d84
6,639
def delta_C(parcels_old, parcels_new, normed=False): """ Compute the number of vertices that change connected component from old parcellation to new parcellation. Parameters: - - - - - parcels_old : dictionary old connected component sample assignments parcels_new : dictionary new connected component sample assignments Returns: - - - - deltaC : int number of vertices that changed label """ new = set(map(len, parcels_new.values())) old = set(map(len, parcels_old.values())) deltaC = np.int32(list(new.difference(old))).sum() if normed: deltaC = deltaC / np.sum(list(new)) return deltaC
07f48d30fbaa4b0278871b199d7768c6f2d49508
6,640
def increment(number): """Increases a given number by 1""" return number + 1
ad10a887ee571182247e76fe41fddd6d53b2dc6a
6,641
def get_recent_added_companies(parser, token): """ Gets any number of the recent added comapnies. Syntax:: {% get_recent_added_companies [limit] as [var_name] %} """ return base_tag(parser, token, RecentCreatedCompanies)
73c7c0f12951ba9d6b6a3220c2f88055ea027624
6,642
import glob def search_data(templates, pols, matched_pols=False, reverse_nesting=False, flatten=False): """ Glob-parse data templates to search for data files. Parameters ---------- templates : str or list A glob-parsable search string, or list of such strings, with a {pol} spot for string formatting. Ex. ["zen.even.{pol}.LST.*.HH.uv"] pols : str or list A polarization string, or list of polarization strings, to search for. Ex. ["xx", "yy"] matched_pols : boolean If True, only use datafiles that are present for all polarizations. reverse_nesting : boolean If True, flip the nesting of datafiles to be datafile-polarization. By default, the output is polarization-datafile. flatten : boolean If True, flatten the nested output datafiles to a single hierarchy. Returns ------- datafiles : list A nested list of paths to datafiles. By default, the structure is polarization-datafile nesting. If reverse_nesting, then the structure is flipped to datafile-polarization structure. datapols : list List of polarizations for each file in datafile """ # type check if isinstance(templates, (str, np.str)): templates = [templates] if isinstance(pols, (str, np.str, np.integer, int)): pols = [pols] # search for datafiles datafiles = [] datapols = [] for pol in pols: dps = [] dfs = [] for template in templates: _dfs = glob.glob(template.format(pol=pol)) if len(_dfs) > 0: dfs.extend(_dfs) dps.extend([pol for df in _dfs]) if len(dfs) > 0: datafiles.append(sorted(dfs)) datapols.append(dps) # get unique files allfiles = [item for sublist in datafiles for item in sublist] allpols = [item for sublist in datapols for item in sublist] unique_files = set() for _file in allfiles: for pol in pols: if ".{pol}.".format(pol=pol) in _file: unique_files.update(set([_file.replace(".{pol}.".format(pol=pol), ".{pol}.")])) break unique_files = sorted(unique_files) # check for unique files with all pols if matched_pols: Npols = len(pols) _templates = [] for _file in unique_files: goodfile = True for pol in pols: if _file.format(pol=pol) not in allfiles: goodfile = False if goodfile: _templates.append(_file) # achieve goal by calling search_data with new _templates that are polarization matched datafiles, datapols = search_data(_templates, pols, matched_pols=False, reverse_nesting=False) # reverse nesting if desired if reverse_nesting: datafiles = [] datapols = [] for _file in unique_files: dfs = [] dps = [] for pol in pols: df = _file.format(pol=pol) if df in allfiles: dfs.append(df) dps.append(pol) datafiles.append(dfs) datapols.append(dps) # flatten if flatten: datafiles = [item for sublist in datafiles for item in sublist] datapols = [item for sublist in datapols for item in sublist] return datafiles, datapols
9f8018de15db0659928e28779ebf4acda0ddba74
6,643
import re def normalize_word(word): """ :type word: str :rtype: str """ acronym_pattern = r'^(?:[A-Z]\.)+$' if re.match(pattern=acronym_pattern, string=word): word = word.replace('.', '') if word.lower() in _REPLACE_WORDS: replacement = _REPLACE_WORDS[word.lower()] if word.islower(): return replacement.lower() elif word.isupper(): return replacement.upper() elif word[0].isupper() and word[1:].islower(): return replacement.capitalize() else: return replacement else: return word
e2c96d456cc8b555b68f2c7498a6d2898ce5990e
6,644
def _ggm_qsize_prob_gt_0_whitt_5_2(arr_rate, svc_rate, c, ca2, cs2): """ Return the approximate P(Q>0) in G/G/m queue using Whitt's simple approximation involving rho and P(W>0). This approximation is exact for M/M/m and has strong theoretical support for GI/M/m. It's described by Whitt as "crude" but is "a useful quick approximation". See Section 5 of Whitt, Ward. "Approximations for the GI/G/m queue" Production and Operations Management 2, 2 (Spring 1993): 114-161. In particular, this is Equation 5.2. Parameters ---------- arr_rate : float average arrival rate to queueing system svc_rate : float average service rate (each server). 1/svc_rate is mean service time. c : int number of servers cv2_svc_time : float squared coefficient of variation for service time distribution Returns ------- float ~ P(Q > 0) """ rho = arr_rate / (svc_rate * float(c)) pdelay = ggm_prob_wait_whitt(arr_rate, svc_rate, c, ca2, cs2) prob_gt_0 = rho * pdelay return prob_gt_0
3eded5597dc199e61c4d79187369e1a84531ac3d
6,645
def pds3_label_gen_date(file): """Returns the creation date of a given PDS3 label. :param path: File path :type path: str :return: Creation date :rtype: str """ generation_date = "N/A" with open(file, "r") as f: for line in f: if "PRODUCT_CREATION_TIME" in line: generation_date = line.split("=")[1].strip() return generation_date
c2877fa9246dd0c12c6ea47635ab248dc038b179
6,646
def harmony(*args): """ Takes an arbitrary number of floats and prints their harmonic medium value. Calculation is done with formula: number_of_args \ (1 \ item1 + 1 \ item2 + ...) Args: *args (tuple): number of arguments with a type: float, integer Returns: float: harmonic medium value """ result = 0 if 0 in args: return 0.0 for item in args: result += 1 / item return len(args) / result
bc66276b3ef27ef0bfd059afa8ca7afd5d9cbb82
6,647
def node_gdf_from_graph(G, crs = 'epsg:4326', attr_list = None, geometry_tag = 'geometry', xCol='x', yCol='y'): """ Function for generating GeoDataFrame from Graph :param G: a graph object G :param crs: projection of format {'init' :'epsg:4326'}. Defaults to WGS84. note: here we are defining the crs of the input geometry - we do NOT reproject to this crs. To reproject, consider using geopandas' to_crs method on the returned gdf. :param attr_list: list of the keys which you want to be moved over to the GeoDataFrame, if not all. Defaults to None, which will move all. :param geometry_tag: specify geometry attribute of graph, default 'geometry' :param xCol: if no shapely geometry but Longitude present, assign here :param yCol: if no shapely geometry but Latitude present, assign here :returns: a geodataframe of the node objects in the graph """ nodes = [] keys = [] # finds all of the attributes if attr_list is None: for u, data in G.nodes(data = True): keys.append(list(data.keys())) flatten = lambda l: [item for sublist in l for item in sublist] attr_list = list(set(flatten(keys))) if geometry_tag in attr_list: non_geom_attr_list = attr_list non_geom_attr_list.remove(geometry_tag) else: non_geom_attr_list = attr_list if 'node_ID' in attr_list: non_geom_attr_list = attr_list non_geom_attr_list.remove('node_ID') z = 0 for u, data in G.nodes(data = True): if geometry_tag not in attr_list and xCol in attr_list and yCol in attr_list : try: new_column_info = { 'node_ID': u, 'geometry': Point(data[xCol], data[yCol]), 'x': data[xCol], 'y': data[yCol]} except: print('Skipped due to missing geometry data:',(u, data)) else: try: new_column_info = { 'node_ID': u, 'geometry': data[geometry_tag], 'x':data[geometry_tag].x, 'y':data[geometry_tag].y} except: print((u, data)) for i in non_geom_attr_list: try: new_column_info[i] = data[i] except: pass nodes.append(new_column_info) z += 1 nodes_df = pd.DataFrame(nodes) nodes_df = nodes_df[['node_ID', *non_geom_attr_list, geometry_tag]] nodes_df = nodes_df.drop_duplicates(subset = ['node_ID'], keep = 'first') nodes_gdf = gpd.GeoDataFrame(nodes_df, geometry = nodes_df.geometry, crs = crs) return nodes_gdf
cf5849c672877010aae7b1fb841a6993a53d232f
6,648
def views(): """ Used for the creation of Orientation objects with `Orientations.from_view_up` """ return [[1, 0, 0], [2, 0, 0], [-1, 0, 0]]
21ffce8e8a56cf31e2d03a6384d584bcb4bfb2c8
6,649
from sys import path def savePlot(widget, default_file_type, old_file_path=None): """Saves a plot in the specified file format. :param widget: graphics widget. :param default_file_type: default save file type. :param old_file_path: file path from a previous save operation. :return: returns file path, returns empty string or old file path when user cancels save. """ gr_file_types = {**gr.PRINT_TYPE, **gr.GRAPHIC_TYPE} save_types = ";;".join(sorted(set(gr_file_types.values()))) default_file = 'untitled' if old_file_path: default_file = path.splitext(old_file_path)[0] file_path, _ = QFileDialog.getSaveFileName(None, 'Save as...', default_file, filter=save_types, initialFilter=default_file_type) if not file_path: return "" if not old_file_path else old_file_path file_ext = path.splitext(file_path)[1] if file_ext.lower()[1:] in gr_file_types: widget.save(file_path) else: raise TypeError("Unsupported file format {}".format(file_ext)) return file_path
5f1a9ab9f8bf9854716737fe59bf8f95710ab2be
6,650
def check_closed(f): """Decorator that checks if connection/cursor is closed.""" def g(self, *args, **kwargs): if self.closed: raise exceptions.Error(f'{self.__class__.__name__} already closed') return f(self, *args, **kwargs) return g
4772de94c28022266ee01f0c900e8937859cc58c
6,651
import os import subprocess import example import pickle def generate_glove_vecs(revs): """ This function generates GloVe vectors based on the training data. This function can be more optimized in future. :return: A dictionary containing words as keys and their GloVe vectors as the corresponding values. :rtype: dict """ os.chdir("GloVe") subprocess.call(['python3', 'setup.py', 'cythonize']) os.system("pip3 install -e .") os.chdir("..") example.main_ex(revs) word_vectors = pickle.load(open("glove.model", "rb"))["words_and_vectors"] return word_vectors
4f252bd2208d55b71b59c249b2c83e7bda12b325
6,652
def get_diff_comparison_score(git_commit, review_url, git_checkout_path, cc): # pragma: no cover """Reads the diff for a specified commit Args: git_commit(str): a commit hash review_url(str): a rietveld review url git_checkout_path(str): path to a local git checkout cc: a cursor for the Cloud SQL connection Return: score(float): a score in [0,1] where 0 is no similarity and 1 is a perfect match """ git_diff = get_git_diff(git_commit, git_checkout_path) comparable_git_diff = [x for x in git_diff if x.startswith('+') \ or x.startswith('-')] rietveld_diff = get_rietveld_diff(review_url, cc, git_checkout_path) comparable_rietveld_diff = [x for x in rietveld_diff if x.startswith('+') \ or x.startswith('-')] matching = list(set(comparable_git_diff) - set(comparable_rietveld_diff)) total = max(len(comparable_git_diff), len(comparable_rietveld_diff)) score = 1 - float(len(matching)) / total if total != 0 else 0 return score
b68904a62d1e42b8e147705984c9455ff0f5d6fc
6,653
def pack(pieces=()): """ Join a sequence of strings together. :param list pieces: list of strings :rtype: bytes """ return b''.join(pieces)
ffd0852a16c6292f921e5cf205301171e3a96fd3
6,654
def options(): """ pylbm command line options """ parser = ArgumentParser() logging = parser.add_argument_group('log') logging.add_argument("--log", dest="loglevel", default="WARNING", choices=['WARNING', 'INFO', 'DEBUG', 'ERROR'], help="Set the log level") monitoring = parser.add_argument_group('monitoring') monitoring.add_argument("--monitoring", action="store_true", help="Set the monitoring") mpi = parser.add_argument_group('mpi splitting') mpi.add_argument("-npx", dest="npx", default=1, type=int, help="Set the number of processes in x direction") mpi.add_argument("-npy", dest="npy", default=1, type=int, help="Set the number of processes in y direction") mpi.add_argument("-npz", dest="npz", default=1, type=int, help="Set the number of processes in z direction") args, _ = parser.parse_known_args() return args
e3df9ee1128dccf09eae9b6cde53aab24e639e8b
6,655
def compte_var(f, var): """compte le nombre d'apparition de la variable var dans f""" n = f.nb_operandes() if n == 0: v = f.get_val() if v == var: return 1 else: return 0 elif n == 1: f2 = (f.decompose())[0] return compte_var(f2, var) else: [f2, f3] = f.decompose() return compte_var(f2, var) + compte_var(f3, var)
002051e3bf723cfcc1a2cb3d094b58980591adc5
6,656
from watchlist.models import User def inject_vars(): # 函数名可以随意修改 """模板上下文处理函数""" user = User.query.first() # 用户对象 if not user: user = User() user.name = 'BL00D' return locals()
edf9126fb919cb825acac3f951f481575fe2ef57
6,657
def try_parse_section(text: str, section_name: str) -> str: """ Parse a section. Return an empty string if section not found. Args: text (str): text section_name (str): section's name Returns: (str): section """ try: return parse_section(text, section_name) except Exception: return ""
26c8d6d3f8475954fcf742e662981ad5f1223e53
6,658
from bs4 import BeautifulSoup def get_location_based_lifers(web_page): """ a method that takes in a web page and returns back location frequency for lifers and lifer details. """ bs4_object = BeautifulSoup(web_page, html_parser) table_list = bs4_object.find_all('li', class_=myebird_species_li_class) lifer_data_list = [] for item in table_list: bird_name = item.find_all('div')[1].find('a').find_all('span')[0].contents[0].strip() location = item.find_all('div')[2].find_all('div')[1].find_all('a')[0].contents[0].strip() date = item.find_all('div')[2].find_all('div')[0].find('a').contents[0].strip() lifer_data_list.append([bird_name, location, date]) location_frequency = dict() for item in range(len(lifer_data_list)): if lifer_data_list[item][1] in location_frequency.keys(): location_frequency[lifer_data_list[item][1]] += 1 else: location_frequency[lifer_data_list[item][1]] = 1 sorted_location_frequency = sorted(location_frequency.items(), key=lambda x: x[1], reverse=True) return sorted_location_frequency, lifer_data_list
1c6b85962f6c142ab816255a1fe5c98f272dfebb
6,659
def parse_show_qos_queue_profile(raw_result): """ Parse the show command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the 'show qos queue-profile' command in a \ dictionary: for 'show qos queue-profile': :: { 'default': {'profile_name': 'default', 'profile_status': 'applied'}, 'factory-default': {'profile_name': 'factory-default', 'profile_status': 'complete'} } for 'show qos queue-profile <name>': :: { '0': {'queue_num': '0', 'local_priorities': '0', 'name': 'Scavenger_and_backup_data'}, '1': {'queue_num': '1', 'local_priorities': '1', 'name': ''}, ... } """ hyphen_line = raw_result.splitlines()[1] columns = [pos for pos, char in enumerate(hyphen_line) if char == ' '] result = {} if len(columns) + 1 == 2: # All profiles. # Skip the first two banner lines. for line in raw_result.splitlines()[2:]: profile_name = line[columns[0]:len(line)].strip() result[profile_name] = {} result[profile_name]['profile_status'] = \ line[0:columns[0]].strip() result[profile_name]['profile_name'] = \ line[columns[0]:len(line)].strip() elif len(columns) + 1 == 3: # Single profile. # Skip the first two banner lines. for line in raw_result.splitlines()[2:]: queue_num = line[0:columns[0]].strip() result[queue_num] = {} result[queue_num]['queue_num'] = \ line[0:columns[0]].strip() result[queue_num]['local_priorities'] = \ line[columns[0]:columns[1]].strip() result[queue_num]['name'] = \ line[columns[1]:len(line)].strip() else: # Error. raise ValueError("Unexpected number of columns.") return result
2a883a50663607356e0edadeb2d4cf17d34ab028
6,660
import random import io def plot_png(num_x_points=50): """ renders the plot on the fly. """ fig = Figure() axis = fig.add_subplot(1, 1, 1) x_points = range(num_x_points) axis.plot(x_points, [random.randint(1, 30) for x in x_points]) output = io.BytesIO() FigureCanvasAgg(fig).print_png(output) return Response(output.getvalue(), mimetype="image/png")
bac5e9146bf0b60d943e5d58376a84eddebd21ec
6,661
def render_message(session, window, msg, x, y): """Render a message glyph. Clears the area beneath the message first and assumes the display will be paused afterwards. """ # create message box msg = GlyphCoordinate(session, msg, x, y) # clear existing glyphs which intersect for gly in ( session.query(GlyphCoordinate) .join(GlyphCoordinate.glyph) .filter(GlyphCoordinate.intersects(msg)) ): gly.blank(window) # render msg.render(window, {}) window.refresh() return msg
2f0362dfa1f884571339456b0a610e0f6cdd75a6
6,662
from pathlib import Path def part_one(filename='input.txt', target=2020): """Satisfies part one of day one by first sorting the input rows so we can avoid the worst case O(n**2). We incur O(n log n) to do the sort followed by a brute force search with short circuiting if the sum exceeds our target. This is possible since we know in sorted order, only larger values will follow. Note, we assume only one valid solution in the given file. If more than one, there is no guarantee which will be returned. Parameters ---------- filename : str, optional The file to parse as input will contain one integer per line, by default 'input.txt' target : int, optional The target sum we want to reach, by default 2020 Returns ------- int The product of the two integers that sum to the target value Raises ------ Exception Probably overkill, but I wanted to know if my code was failing to find a solution. Also, I could have looked for a more appropriate exception than the base one. """ items = sorted(map(int, Path(filename).read_text().split())) count = len(items) for i in range(count): for j in range(i+1, count): summand = items[i] + items[j] if summand > target: break elif summand == target: return items[i]*items[j] raise Exception('No solution!')
d9c6790f9c5b5de7fbd9555a8483f2cb0e156b3b
6,663
def get_urls(name, version=None, platform=None): """ Return a mapping of standard URLs """ dnlu = rubygems_download_url(name, version, platform) return dict( repository_homepage_url=rubygems_homepage_url(name, version), repository_download_url=dnlu, api_data_url=rubygems_api_url(name, version), download_url=dnlu, )
d11666a72771187166a6b9f620237639fd8422f3
6,664
def getEHfields(m1d, sigma, freq, zd, scaleUD=True, scaleValue=1): """Analytic solution for MT 1D layered earth. Returns E and H fields. :param discretize.base.BaseMesh, object m1d: Mesh object with the 1D spatial information. :param numpy.ndarray, vector sigma: Physical property of conductivity corresponding with the mesh. :param float, freq: Frequency to calculate data at. :param numpy.ndarray, vector zd: location to calculate EH fields at :param bool, scaleUD: scales the output to be scaleValue at the top, increases numerical stability. Assumes a halfspace with the same conductive as the deepest cell. """ # Note add an error check for the mesh and sigma are the same size. # Constants: Assume constant mu = mu_0 * np.ones((m1d.nC + 1)) eps = eps_0 * np.ones((m1d.nC + 1)) # Angular freq w = 2 * np.pi * freq # Add the halfspace value to the property sig = np.concatenate((np.array([sigma[0]]), sigma)) # Calculate the wave number k = np.sqrt(eps * mu * w ** 2 - 1j * mu * sig * w) # Initiate the propagation matrix, in the order down up. UDp = np.zeros((2, m1d.nC + 1), dtype=complex) UDp[ 1, 0 ] = scaleValue # Set the wave amplitude as 1 into the half-space at the bottom of the mesh # Loop over all the layers, starting at the bottom layer for lnr, h in enumerate(m1d.hx): # lnr-number of layer, h-thickness of the layer # Calculate yp1 = k[lnr] / (w * mu[lnr]) # Admittance of the layer below the current layer zp = (w * mu[lnr + 1]) / k[lnr + 1] # Impedance in the current layer # Build the propagation matrix # Convert fields to down/up going components in layer below current layer Pj1 = np.array([[1, 1], [yp1, -yp1]], dtype=complex) # Convert fields to down/up going components in current layer Pjinv = 1.0 / 2 * np.array([[1, zp], [1, -zp]], dtype=complex) # Propagate down and up components through the current layer elamh = np.array( [[np.exp(-1j * k[lnr + 1] * h), 0], [0, np.exp(1j * k[lnr + 1] * h)]] ) # The down and up component in current layer. UDp[:, lnr + 1] = elamh.dot(Pjinv.dot(Pj1)).dot(UDp[:, lnr]) if scaleUD: # Scale the values such that 1 at the top scaleVal = UDp[:, lnr + 1 :: -1] / UDp[1, lnr + 1] if np.any(~np.isfinite(scaleVal)): # If there is a nan (thickness very great), rebuild the move up cell scaleVal = np.zeros_like(UDp[:, lnr + 1 :: -1], dtype=complex) scaleVal[1, 0] = scaleValue UDp[:, lnr + 1 :: -1] = scaleVal # Calculate the fields Ed = np.empty((zd.size,), dtype=complex) Eu = np.empty((zd.size,), dtype=complex) Hd = np.empty((zd.size,), dtype=complex) Hu = np.empty((zd.size,), dtype=complex) # Loop over the layers and calculate the fields # In the halfspace below the mesh dup = m1d.vectorNx[0] dind = dup >= zd Ed[dind] = UDp[1, 0] * np.exp(-1j * k[0] * (dup - zd[dind])) Eu[dind] = UDp[0, 0] * np.exp(1j * k[0] * (dup - zd[dind])) Hd[dind] = (k[0] / (w * mu[0])) * UDp[1, 0] * np.exp(-1j * k[0] * (dup - zd[dind])) Hu[dind] = -(k[0] / (w * mu[0])) * UDp[0, 0] * np.exp(1j * k[0] * (dup - zd[dind])) for ki, mui, epsi, dlow, dup, Up, Dp in zip( k[1::], mu[1::], eps[1::], m1d.vectorNx[:-1], m1d.vectorNx[1::], UDp[0, 1::], UDp[1, 1::], ): dind = np.logical_and(dup >= zd, zd > dlow) Ed[dind] = Dp * np.exp(-1j * ki * (dup - zd[dind])) Eu[dind] = Up * np.exp(1j * ki * (dup - zd[dind])) Hd[dind] = (ki / (w * mui)) * Dp * np.exp(-1j * ki * (dup - zd[dind])) Hu[dind] = -(ki / (w * mui)) * Up * np.exp(1j * ki * (dup - zd[dind])) # Return return the fields return Ed, Eu, Hd, Hu
e850762955ff513adef7099b61eb285d059c1ffe
6,665
def repeated(f, n): """Returns a function that takes in an integer and computes the nth application of f on that integer. Implement using recursion! >>> add_three = repeated(lambda x: x + 1, 3) >>> add_three(5) 8 >>> square = lambda x: x ** 2 >>> repeated(square, 2)(5) # square(square(5)) 625 >>> repeated(square, 4)(5) # square(square(square(square(5)))) 152587890625 >>> repeated(square, 0)(5) 5 >>> from construct_check import check >>> # ban iteration >>> check(HW_SOURCE_FILE, 'repeated', ... ['For', 'While']) True """ if n == 0: return identity else: return compose1(f, repeated(f, n - 1))
dd2024ffa7c5abbcfb43b1a6a8d6ea00c3fb42c4
6,666
def method_functions(): """ Returns a dictionary containing the valid method keys and their corresponding dispersion measure functions. """ return _available
6d9ea23e4c0449b4b2d0a27b5117be30400a7d43
6,667
import os def get_file_names(maindir, sessid, expid, segid, date, mouseid, runtype="prod", mouse_dir=True, check=True): """ get_file_names(maindir, sessionid, expid, date, mouseid) Returns the full path names of all of the expected data files in the main directory for the specified session and experiment on the given date that can be used for the Credit Assignment analysis. Required args: - maindir (str): name of the main data directory - sessid (int) : session ID (9 digits) - expid (str) : experiment ID (9 digits) - segid (str) : segmentation ID (9 digits) - date (str) : date for the session in YYYYMMDD, e.g. "20160802" - mouseid (str): mouse 6-digit ID string used for session files Optional args: - runtype (str) : "prod" (production) or "pilot" data default: "prod" - mouse_dir (bool): if True, session information is in a "mouse_*" subdirectory default: True - check (bool) : if True, checks whether the files and directories in the output dictionaries exist (with a few exceptions) default: True Returns: - dirpaths (dict): dictionary of directory paths ["expdir"] (str) : full path name of the experiment directory ["procdir"] (str) : full path name of the processed directory ["demixdir"] (str): full path name of the demixed directory ["segdir"] (str) : full path name of the segmentation directory - filepaths (dict): dictionary of file paths ["behav_video_h5"] (str) : full path name of the behavioral hdf5 video file ["pupil_video_h5"] (str) : full path name of the pupil hdf5 video file ["roi_extract_json"] (str) : full path name of the ROI extraction json ["roi_objectlist_txt"] (str): full path to ROI object list txt ["stim_pkl"] (str) : full path name of the stimulus pickle file ["stim_sync_h5"] (str) : full path name of the stimulus synchronization hdf5 file ["time_sync_h5"] (str) : full path name of the time synchronization hdf5 file Existence not checked: ["align_pkl"] (str) : full path name of the stimulus alignment pickle file ["corrected_data_h5"] (str) : full path name of the motion corrected 2p data hdf5 file ["roi_trace_h5"] (str) : full path name of the ROI raw processed fluorescence trace hdf5 file (allen version) ["roi_trace_dff_h5"] (str) : full path name of the ROI dF/F trace hdf5 file (allen version) ["zstack_h5"] (str) : full path name of the zstack 2p hdf5 file """ sessdir, expdir, procdir, demixdir, segdir = get_sess_dirs( maindir, sessid, expid, segid, mouseid, runtype, mouse_dir, check) roi_trace_paths = get_roi_trace_paths( maindir, sessid, expid, segid, mouseid, runtype, mouse_dir, dendritic=False, check=False) # will check below, if required # set the file names sess_m_d = f"{sessid}_{mouseid}_{date}" dirpaths = {"expdir" : expdir, "procdir" : procdir, "segdir" : segdir, "demixdir": demixdir } filepaths = {"align_pkl" : os.path.join(sessdir, f"{sess_m_d}_df.pkl"), "behav_video_h5" : os.path.join(sessdir, f"{sess_m_d}_video-0.h5"), "correct_data_h5" : os.path.join(procdir, "concat_31Hz_0.h5"), "pupil_video_h5" : os.path.join(sessdir, f"{sess_m_d}_video-1.h5"), "roi_extract_json" : os.path.join(procdir, f"{expid}_input_extract_traces.json"), "roi_trace_h5" : roi_trace_paths["roi_trace_h5"], "roi_trace_dff_h5" : roi_trace_paths["roi_trace_dff_h5"], "roi_objectlist_txt": os.path.join(segdir, "objectlist.txt"), "stim_pkl" : os.path.join(sessdir, f"{sess_m_d}_stim.pkl"), "stim_sync_h5" : os.path.join(sessdir, f"{sess_m_d}_sync.h5"), "time_sync_h5" : os.path.join(expdir, f"{expid}_time_synchronization.h5"), "zstack_h5" : os.path.join(sessdir, f"{sessid}_zstack_column.h5"), } if check: # files not to check for (are created if needed or should be checked # when needed, due to size) no_check = ["align_pkl", "correct_data_h5", "zstack_h5", "roi_trace_h5", "roi_trace_dff_h5"] for key in filepaths.keys(): if key not in no_check: file_util.checkfile(filepaths[key]) return dirpaths, filepaths
7dee7e6b801f4b9270f87b9eaaf256a64cffaf58
6,668
def generate_auth_token(): """Generate a token using jwt. Returns: token. """ key = PRIVATE_KEY data = {'appId': APPLICATION_ID} token = jwt.encode(data, key, algorithm='RS256') return token
a2e9307f392a8a6c0d83e9f1064475c11fc4eeec
6,669
from typing import Dict from typing import Any from typing import List def dict_expand(d: Dict[Any, Any]) -> List[Dict[Any, Any]]: """Converts a dictionary of lists to a list of dictionaries. The resulting list will be of the same length as the longest dictionary value. If any values are not lists then they will be repeated to the required length. Args: d: The dictionary of arrays to expand. Returns: The resulting list of dictionaries. """ size = max([_len_arg(arg) for arg in d.values()]) d = {k: _expand_arg(v, size) for k, v in d.items()} return [{k: v[i] for k, v in d.items()} for i in range(size)]
6ca2c25318a3b6bc0b2a45bf3aeec7187ad78e5c
6,670
def get_asexual_lineage_num_discrete_state_changes(lineage, attribute_list): """Get the number of discrete state changes from an asexual lineage. State is described by the aggregation of all attributes give by attribute list. Args: lineage (networkx.DiGraph): an asexual lineage attribute_list (list): list of attributes (strings) to use when defining a state Returns: Returns the number of discrete states along the lineage. """ # Check that lineage is an asexual lineage. if not utils.is_asexual_lineage(lineage): raise Exception("the given lineage is not an asexual lineage") # Check that all nodes have all given attributes in the attribute list if not utils.all_taxa_have_attributes(lineage, attribute_list): raise Exception("given attributes are not universal among all taxa along the lineage") # get the first state (root node) lineage_id = utils.get_root_ids(lineage)[0] num_states = 1 cur_state = [lineage.nodes[lineage_id][attr] for attr in attribute_list] # count the number of state changes moving down the lineage while True: successor_ids = list(lineage.successors(lineage_id)) if len(successor_ids) == 0: break # We've hit the last thing! lineage_id = successor_ids[0] state = [lineage.nodes[lineage_id][attr] for attr in attribute_list] if cur_state != state: cur_state = state num_states += 1 return num_states
9c0d4badc7b4fea70c56ce69727e48eb991a96e1
6,671
def check_downloaded(dataset: str, directory: str = None) -> bool: """ Check whether dataset is downloaded Args: dataset (str): String of dataset's name, e.g. ml-100k, bx directory (str, optional): String of directory of downloaded data. Defaults to None. Returns: bool: Boolean flag to show if the dataset is downloaded, i.e. name of dataset is in the list of subdirectory in input directory. """ return True if dataset in get_downloaded_data(directory=directory) else False
bf3342e7da11b34918bc2cb9939c95145d2f4feb
6,672
from pathlib import Path def enterprise_1_9_installer() -> Path: """ Return the path to an installer for DC/OS Enterprise 1.9. """ return Path('/tmp/dcos_generate_config_1_9.ee.sh')
857b5d339e05cbb225189d7ee47d0415fc539c54
6,673
def Laplacian(n): """ Create Laplacian on 2-dimensional grid with n*n nodes """ B = forward_diff_matrix(n) D = -B.T @ B Dx = sparse.kron(sparse.eye(n), D).tocsr() Dy = sparse.kron(D, sparse.eye(n)).tocsr() return Dx + Dy
47d70e635dc8e7d722e069435d17214a6ea3c6de
6,674
import re def LookupGitSVNRevision(directory, depth): """ Fetch the Git-SVN identifier for the local tree. Parses first |depth| commit messages. Errors are swallowed. """ if not IsGitSVN(directory): return None git_re = re.compile(r'^\s*git-svn-id:\s+(\S+)@(\d+)') proc = RunGitCommand(directory, ['log', '-' + str(depth)]) if proc: for line in proc.stdout: match = git_re.match(line) if match: id = match.group(2) if id: proc.stdout.close() # Cut pipe for fast exit. return id return None
664da44ee6057a62eb8ece161ade5cabac15bc7b
6,675
def collect_jars( dep_targets, dependency_analyzer_is_off = True, unused_dependency_checker_is_off = True, plus_one_deps_is_off = True): """Compute the runtime and compile-time dependencies from the given targets""" # noqa if dependency_analyzer_is_off: return _collect_jars_when_dependency_analyzer_is_off( dep_targets, unused_dependency_checker_is_off, plus_one_deps_is_off, ) else: return _collect_jars_when_dependency_analyzer_is_on(dep_targets)
10203c31bb2d1b5df9336d606355b497f7dd755a
6,676
def _cred1_adapter(user=None, password=None): """Just a sample adapter from one user/pw type to another""" return dict(user=user + "_1", password=password + "_2")
9e7c218d2dc01793cba232ba1f6d69a54bf21fee
6,677
def acc_metric(y_true, y_pred): """ Accuracy """ diff = K.abs(y_pred - y_true) * 5000 return K.mean(diff, axis=-1)
0722791db5546f16648f74b8927590de8696e3d5
6,678
async def confirm(message: discord.Message, fallback: str = None) -> bool: """ Helper function to send a checkmark reaction on a message. This would be used for responding to a user that an action completed successfully, without sending a whole other message. If a checkmark reaction cannot be added, the optional `fallback` message will be sent instead. :param discord.Message message: The message to add the reaction to. :param str fallback: The fallback message to be sent to the channel, if the reaction could not be added. :return: Whether confirming the message succeeded. """ try: await message.add_reaction("☑") except: pass else: return True if fallback is None: return False # now still executing only if the above failed try: await message.channel.send(fallback) except: return False # we weren't able to send any feedback to the user at all else: return True
2567957d4239605072bd4f707c12e2b265b8cfbe
6,679
def get_layer_version( lambda_client: BaseClient, layer_name: str, version: int, ) -> "definitions.LambdaLayer": """Retrieve the configuration for the specified lambda layer.""" return definitions.LambdaLayer( lambda_client.get_layer_version( LayerName=layer_name, VersionNumber=version, ) )
cfa2121ac757ae24b67bb25f7fd3046f017df85d
6,680
import requests def get_detail_msg(detail_url): """ 2.获取某个职位的详细数据 :param detail_url: 职位详细页面的url :return: 职位数据 """ # print('请求的详细地址是:' + detail_url) response = requests.get(detail_url, headers=HEADERS) html_element = etree.HTML(response.text) position = {} # 【数据】获取职位标题 title = html_element.xpath('//tr[@class="h"]/td/text()')[0] position['title'] = title # 【数据】工作地点/职位类别 top_infos = html_element.xpath('//tr[@class="c bottomline"]//text()') position['location'] = top_infos[top_infos.index('工作地点:') + 1] position['category'] = top_infos[top_infos.index('职位类别:') + 1] content_infos = html_element.xpath('//ul[@class="squareli"]') # 【数据】工作职责 work_do_info = content_infos[0] position['duty'] = work_do_info.xpath("./li/text()") # 【数据】工作要求 work_ask_info = content_infos[1] position['ask'] = work_ask_info.xpath('./li/text()') return position
2fc5b316abed9eb9aeff99ae87cdd8e5e59a5e70
6,681
def wizard_process_received_form(form): """ Processing of form received during the time measure Expected result example: {1: '00:43.42', 2: '00:41.35', 3: '00:39.14', 4: '00:27.54'} """ lines = {key.split('_')[1]: value.split('_')[1] for key, value in form.items() if key.startswith("line")} # print(lines) times = {key.split('_')[1]: value for key, value in form.items() if key.startswith("time")} # print(times) return {int(value): times[key] for key, value in lines.items()}
54b10589cab7ce689b64f5373d2f0a998044db82
6,682
import inspect def getsource(obj,is_binary=False): """Wrapper around inspect.getsource. This can be modified by other projects to provide customized source extraction. Inputs: - obj: an object whose source code we will attempt to extract. Optional inputs: - is_binary: whether the object is known to come from a binary source. This implementation will skip returning any output for binary objects, but custom extractors may know how to meaningfully process them.""" if is_binary: return None else: return inspect.getsource(obj)
9e97a030c695b9ea50d27abc5253e47be7d4c06a
6,683
import re def extract_sector_id(room): """Given a room identifier of the form: 'aaa-bbb-cc-d-e-123[abcde]' Return the sector id: '123' """ m = re.search(r'(?P<sector_id>\d+)', room) return m.group('sector_id') if m else None
f5bfb64d32769cd4b6c2b7309d41450fa807d7a2
6,684
def splitext_all(_filename): """split all extensions (after the first .) from the filename should work similar to os.path.splitext (but that splits only the last extension) """ _name, _extensions = _filename.split('.')[0], '.'.join(_filename.split('.')[1:]) return(_name, "."+ _extensions)
bf9e4ee06eb30dfeb7898ce6e34607bef20b290b
6,685
def catch(func, *args, **kw): """Catch most exceptions in 'func' and prints them. Use to decorate top-level functions and commands only. """ # d: print("calling %s with args %s, %s" % (func.__name__, args, kw)) try: return func(*args, **kw) except Exception as e: print(e) # TODO consider: # if e.message: # print(e.message) # else: # print(e)
bca64becacb6121afff6d5ecaa14b1a2ef2ef3dc
6,686
def tag_in_tags(entity, attribute, value): """ Return true if the provided entity has a tag of value in its tag list. """ return value in entity.tags
ad88be5f8848b387f2a261ce5506dffde285a1d8
6,687
def generate_finding_title(title): """ Generate a consistent title for a finding in AWS Security Hub * Setup as a function for consistency """ return "Trend Micro: {}".format(title)
0cf390c2579e06c2166b086332035b864d3db1e3
6,688
def makeHexagon(x,y,w,h): """Return hexagonal QPolygon. (x,y) is top left coner""" points=[] cos=[1.,0.5,-0.5,-1,-0.5,0.5] sin=[0,0.866025,0.866025,0,-0.866025,-0.866025] for i in range(len (cos)): points.append(QPoint(x+w*cos[i],y+h*sin[i])) return QPolygonF(points)
7310e0313130f54b125c81f332a541b2b2b9b9a9
6,689
def save_conv_output(activations, name): """ Saves layer output in activations dict with name key """ def get_activation(m, i, o): activations[name] = F.relu(o).data.cpu().numpy() return get_activation
13034128234ea6a9633ae144ac02788f2d49986a
6,690
async def get_profile_xp(user_id: int): """ Get a user's profile xp. :param user_id: Discord User ID """ return (await self.conn.fetchrow("SELECT profilexp FROM currency.levels WHERE userid = $1", user_id))[0]
d029bb335442aa3ba5ef02b143351e8ccc6b6434
6,691
import os import shutil import click def tryrmcache(dir_name, verbose=False): """ removes all __pycache__ starting from directory dir_name all the way to leaf directory Args: dir_name(string) : path from where to start removing pycache """ # directory_list = list() is_removed = False for root, dirs, _ in os.walk(dir_name, topdown=False): for name in dirs: # directory_list.append(os.path.join(root, name)) if name == "__pycache__": shutil.rmtree(os.path.join(root, name)) is_removed = True if verbose: if is_removed: click.echo("[x] __pycache__ successfully deleted") else: click.echo("[ ] __pycache__ doesn't exist", err=True) return is_removed
d4453352b30a8d3683b928f864536bcd1d6fda9f
6,692
from typing import Optional def validate_raw_data(data: Optional[UserPackage]) -> bool: """Returns False if invalid data""" # NOTE: add more validation as more fields are required if data is None or data.contribs is None: return False if ( data.contribs.total_stats.commits_count > 0 and len(data.contribs.total_stats.languages) == 0 ): return False return True
22185bc2691b6a5fce98749c119ea14649c0d676
6,693
def extractTheSunIsColdTranslations(item): """ Parser for 'The Sun Is Cold Translations' """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None if '108 maidens' in item['tags']: return buildReleaseMessageWithType(item, '108 Maidens of Destiny', vol, chp, frag=frag, postfix=postfix) if 'Back to the Apocalypse' in item['tags']: return buildReleaseMessageWithType(item, 'Back to the Apocalypse', vol, chp, frag=frag, postfix=postfix) return False
94ef69f42dd183a2155a02c8035c12da30eb34e2
6,694
import torch def to_device(x, device): """Cast a hierarchical object to pytorch device""" if isinstance(x, torch.Tensor): return x.to(device) elif isinstance(x, dict): for k in list(x.keys()): x[k] = to_device(x[k], device) return x elif isinstance(x, list) or isinstance(x, tuple): return type(x)(to_device(t, device) for t in x) else: raise ValueError('Wrong type !')
a315905fb0cf6d6720103c0d22440418ebd41bf1
6,695
def git_show_oneline(obj): """Returns: One-line description of a git object `obj`, which is typically a commit. https://git-scm.com/docs/git-show """ return exec_headline(['git', 'show', '--oneline', '--quiet', obj])
77427786a8d1b9e3b01d5194387f047c1c9ce505
6,696
from operator import and_ import logging def like_post(): """ Like a post """ try: # This will prevent old code from adding invalid post_ids post_id = int(request.args.get('post_id', '-1')) if post_id < 0: return "No Post Found to like!" vote = (db_session.query(Vote) .filter(and_(Vote.object_id == post_id, Vote.user_id == current_user.id)) .first()) if not vote: vote = Vote(user_id=current_user.id, object_id=post_id) db_session.add(vote) db_session.commit() except Exception as e: logging.warning(f'ERROR processing request {e}') return ""
8cdde2ec6f71104178c49661a9e3fdf5c62bb67d
6,697
def login_post(): """Obdelaj izpolnjeno formo za prijavo""" # Uporabniško ime, ki ga je uporabnik vpisal v formo username = bottle.request.forms.user # Izračunamo MD5 hash geslo, ki ga bomo spravili password = password_md5(bottle.request.forms.psw) # Preverimo, ali se je uporabnik pravilno prijavil c = conn.cursor(cursor_factory=psycopg2.extras.DictCursor) c.execute("SELECT 1 FROM uporabnik WHERE username=%s AND geslo=%s", [username, password]) if c.fetchone() is None: # Username in geslo se ne ujemata return bottle.template("login.html", napaka="Nepravilna prijava", # v template login nastavljeno opozorilo username=username) # ohranimo isto uporabnisko ime else: # Vse je v redu, nastavimo cookie in preusmerimo na glavno stran bottle.response.set_cookie('username', username, path='/', secret=secret) bottle.redirect("/")
9b2243f7e618833d59a7d07454ed8fe86d4b18fc
6,698
def parse_symbol_file(filepath, fapi=None): """Read in stock symbol list from a text file. Args: filepath: Path to file containing stock symbols, one per line. fapi: If this is supplied, the symbols read will be conformed to a financial API; currently 'google' or 'yahoo'. Returns: List of stock symbols; list may be empty if file could not be parsed. """ try: with open(filepath, 'r') as file_handle: symbols = [line.strip() for line in list(file_handle) if '#' not in line] if fapi: symbols = conform_symbols(symbols, fapi) except IOError: symbols = [] return symbols
af0f085fd5424045c71dee6a290a07645f242ff8
6,699