content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def plot_histogram(ax,values,bins,colors='r',log=False,xminmax=None): """ plot 1 histogram """ #print (type(values)) ax.hist(values, histtype="bar", bins=bins,color=colors,log=log, alpha=0.8, density=False, range=xminmax) # Add a small annotation. # ax.annotate('Annotation', xy=(0.25, 4.25), # xytext=(0.9, 0.9), textcoords=ax.transAxes, # va="top", ha="right", # bbox=dict(boxstyle="round", alpha=0.2), # arrowprops=dict( # arrowstyle="->", # connectionstyle="angle,angleA=-95,angleB=35,rad=10"), # ) return ax
d11e89c005275a176fd00d0e2ac5173ee8f490b1
21,358
def build_model(): """Build the model. Returns ------- tensorflow.keras.Model The model. """ input_x = tf.keras.Input( shape=(30,), name='input_x' ) # shape does not include the batch size. layer1 = tf.keras.layers.Dense(5, activation=tf.keras.activations.tanh) layer2 = tf.keras.layers.Dense( 1, activation=tf.keras.activations.sigmoid, name='output_layer' ) h = layer1(input_x) output = layer2(h) return tf.keras.Model(inputs=[input_x], outputs=[output])
81e2ee2533903beaa4a087613e63ea383d8a746b
21,359
import torch def evaluate_generator(generator, backbone_pool, lookup_table, CONFIG, device, val=True): """ Evaluate kendetall and hardware constraint loss of generator """ total_loss = 0 evaluate_metric = {"gen_macs":[], "true_macs":[]} for mac in range(CONFIG.low_macs, CONFIG.high_macs, 10): hardware_constraint = torch.tensor(mac, dtype=torch.float32) hardware_constraint = hardware_constraint.view(-1, 1) hardware_constraint = hardware_constraint.to(device) backbone = backbone_pool.get_backbone(hardware_constraint.item()) backbone = backbone.to(device) normalize_hardware_constraint = min_max_normalize(CONFIG.high_macs, CONFIG.low_macs, hardware_constraint) noise = torch.randn(*backbone.shape) noise = noise.to(device) noise *= 0 arch_param = generator(backbone, normalize_hardware_constraint, noise) arch_param = lookup_table.get_validation_arch_param(arch_param) layers_config = lookup_table.decode_arch_param(arch_param) print(layers_config) gen_mac = lookup_table.get_model_macs(arch_param) hc_loss = cal_hc_loss(gen_mac.cuda(), hardware_constraint.item(), CONFIG.alpha, CONFIG.loss_penalty) evaluate_metric["gen_macs"].append(gen_mac.item()) evaluate_metric["true_macs"].append(mac) total_loss += hc_loss.item() tau, _ = stats.kendalltau(evaluate_metric["gen_macs"], evaluate_metric["true_macs"]) return evaluate_metric, total_loss, tau
53941e29cae9a89c46ce598291638d7df28db4ff
21,360
def getCameras(): """Return a list of cameras in the current maya scene.""" return cmds.listRelatives(cmds.ls(type='camera'), p=True)
a3a6f202250f92c1cab46df92c78b53b15fd5cae
21,362
import re def convert_quotes(text): """ Convert quotes in *text* into HTML curly quote entities. >>> print(convert_quotes('"Isn\\'t this fun?"')) &#8220;Isn&#8217;t this fun?&#8221; """ punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]""" # Special case if the very first character is a quote # followed by punctuation at a non-word-break. Close the quotes by brute # force: text = re.sub(r"""^'(?=%s\\B)""" % (punct_class,), '&#8217;', text) text = re.sub(r"""^"(?=%s\\B)""" % (punct_class,), '&#8221;', text) # Special case for double sets of quotes, e.g.: # <p>He said, "'Quoted' words in a larger quote."</p> text = re.sub(r""""'(?=\w)""", '&#8220;&#8216;', text) text = re.sub(r"""'"(?=\w)""", '&#8216;&#8220;', text) # Special case for decade abbreviations (the '80s): text = re.sub(r"""\b'(?=\d{2}s)""", '&#8217;', text) close_class = r'[^\ \t\r\n\[\{\(\-]' dec_dashes = '&#8211;|&#8212;' # Get most opening single quotes: opening_single_quotes_regex = re.compile(r""" ( \s | # a whitespace char, or &nbsp; | # a non-breaking space entity, or -- | # dashes, or &[mn]dash; | # named dash entities %s | # or decimal entities &\#x201[34]; # or hex ) ' # the quote (?=\w) # followed by a word character """ % (dec_dashes,), re.VERBOSE) text = opening_single_quotes_regex.sub(r'\1&#8216;', text) closing_single_quotes_regex = re.compile(r""" (%s) ' (?!\s | s\b | \d) """ % (close_class,), re.VERBOSE) text = closing_single_quotes_regex.sub(r'\1&#8217;', text) closing_single_quotes_regex = re.compile(r""" (%s) ' (\s | s\b) """ % (close_class,), re.VERBOSE) text = closing_single_quotes_regex.sub(r'\1&#8217;\2', text) # Any remaining single quotes should be opening ones: text = re.sub("'", '&#8216;', text) # Get most opening double quotes: opening_double_quotes_regex = re.compile(r""" ( \s | # a whitespace char, or &nbsp; | # a non-breaking space entity, or -- | # dashes, or &[mn]dash; | # named dash entities %s | # or decimal entities &\#x201[34]; # or hex ) " # the quote (?=\w) # followed by a word character """ % (dec_dashes,), re.VERBOSE) text = opening_double_quotes_regex.sub(r'\1&#8220;', text) # Double closing quotes: closing_double_quotes_regex = re.compile(r""" #(%s)? # character that indicates the quote should be closing " (?=\s) """ % (close_class,), re.VERBOSE) text = closing_double_quotes_regex.sub('&#8221;', text) closing_double_quotes_regex = re.compile(r""" (%s) # character that indicates the quote should be closing " """ % (close_class,), re.VERBOSE) text = closing_double_quotes_regex.sub(r'\1&#8221;', text) # Any remaining quotes should be opening ones. text = re.sub('"', '&#8220;', text) return text
82b5cabc2f4b77f5c39ab785c02e04ff4ca4f517
21,363
import warnings def time_shift(signal, n_samples_shift, circular_shift=True, keepdims=False): """Shift a signal in the time domain by n samples. This function will perform a circular shift by default, inherently assuming that the signal is periodic. Use the option `circular_shift=False` to pad with nan values instead. Notes ----- This function is primarily intended to be used when processing impulse responses. Parameters ---------- signal : ndarray, float Signal to be shifted n_samples_shift : integer Number of samples by which the signal should be shifted. A negative number of samples will result in a left-shift, while a positive number of samples will result in a right shift of the signal. circular_shift : bool, True Perform a circular or non-circular shift. If a non-circular shift is performed, the data will be padded with nan values at the respective beginning or ending of the data, corresponding to the number of samples the data is shifted. keepdims : bool, False Do not squeeze the data before returning. Returns ------- shifted_signal : ndarray, float Shifted input signal """ n_samples_shift = np.asarray(n_samples_shift, dtype=np.int) if np.any(signal.shape[-1] < n_samples_shift): msg = "Shifting by more samples than length of the signal." if circular_shift: warnings.warn(msg, UserWarning) else: raise ValueError(msg) signal = np.atleast_2d(signal) n_samples = signal.shape[-1] signal_shape = signal.shape signal = np.reshape(signal, (-1, n_samples)) n_channels = np.prod(signal.shape[:-1]) if n_samples_shift.size == 1: n_samples_shift = np.broadcast_to(n_samples_shift, n_channels) elif n_samples_shift.size == n_channels: n_samples_shift = np.reshape(n_samples_shift, n_channels) else: raise ValueError("The number of shift samples has to match the number \ of signal channels.") shifted_signal = signal.copy() for channel in range(n_channels): shifted_signal[channel, :] = \ np.roll( shifted_signal[channel, :], n_samples_shift[channel], axis=-1) if not circular_shift: if n_samples_shift[channel] < 0: # index is negative, so index will reference from the # end of the array shifted_signal[channel, n_samples_shift[channel]:] = np.nan else: # index is positive, so index will reference from the # start of the array shifted_signal[channel, :n_samples_shift[channel]] = np.nan shifted_signal = np.reshape(shifted_signal, signal_shape) if not keepdims: shifted_signal = np.squeeze(shifted_signal) return shifted_signal
f5017a5b9988ff5dc10e49b1f2d4127293564607
21,364
def get_avgerr(l1_cols_train,l2_cols_train,own_cols_xgb,own_cols_svm,own_cols_bay,own_cols_adab,own_cols_lass,df_train,df_test,experiment,fold_num=0): """ Use mae as an evaluation metric and extract the appropiate columns to calculate the metric Parameters ---------- l1_cols_train : list list with names for the Layer 1 training columns l2_cols_train : list list with names for the Layer 2 training columns own_cols_xgb : list list with names for the Layer 1 xgb columns own_cols_svm : list list with names for the Layer 1 svm columns own_cols_bay : list list with names for the Layer 1 brr columns own_cols_adab : list list with names for the Layer 1 adaboost columns own_cols_lass : list list with names for the Layer 1 lasso columns df_train : pd.DataFrame dataframe for training predictions df_test : pd.DataFrame dataframe for testing predictions experiment : str dataset name fold_num : int number for the fold Returns ------- float best mae for Layer 1 float best mae for Layer 2 float best mae for Layer 3 float best mae for all layers float mae for xgb float mae for svm float mae for brr float mae for adaboost float mae for lasso list selected predictions Layer 2 list error for the selected predictions Layer 2 float train mae for Layer 3 """ # Get the mae l1_scores = [x/float(len(df_train["time"])) for x in list(df_train[l1_cols_train].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] l2_scores = [x/float(len(df_train["time"])) for x in list(df_train[l2_cols_train].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_xgb = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_xgb].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_svm = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_svm].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_bay = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_bay].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_lass = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_lass].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_adab = [x/float(len(df_train["time"])) for x in list(df_train[own_cols_adab].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] own_scores_l2 = [x/float(len(df_train["time"])) for x in list(df_train[l2_cols_train].sub(df_train["time"].squeeze(),axis=0).apply(abs).apply(sum,axis="rows"))] selected_col_l1 = l1_cols_train[l1_scores.index(min(l1_scores))] selected_col_l2 = l2_cols_train[l2_scores.index(min(l2_scores))] # Set mae to 0.0 if not able to get column try: selected_col_own_xgb = own_cols_xgb[own_scores_xgb.index(min(own_scores_xgb))] except KeyError: selected_col_own_xgb = 0.0 try: selected_col_own_svm = own_cols_svm[own_scores_svm.index(min(own_scores_svm))] except KeyError: selected_col_own_svm = 0.0 try: selected_col_own_bay = own_cols_bay[own_scores_bay.index(min(own_scores_bay))] except KeyError: selected_col_own_bay = 0.0 try: selected_col_own_lass = own_cols_lass[own_scores_lass.index(min(own_scores_lass))] except KeyError: selected_col_own_lass = 0.0 try: selected_col_own_adab = own_cols_adab[own_scores_adab.index(min(own_scores_adab))] except KeyError: selected_col_own_adab = 0.0 # Remove problems with seemingly duplicate columns getting selected try: cor_l1 = sum(map(abs,df_test["time"]-df_test[selected_col_l1]))/len(df_test["time"]) except KeyError: selected_col_l1 = selected_col_l1.split(".")[0] cor_l1 = sum(map(abs,df_test["time"]-df_test[selected_col_l1]))/len(df_test["time"]) try: cor_l2 = sum(map(abs,df_test["time"]-df_test[selected_col_l2]))/len(df_test["time"]) except KeyError: selected_col_l2 = selected_col_l2.split(".")[0] cor_l2 = sum(map(abs,df_test["time"]-df_test[selected_col_l2]))/len(df_test["time"]) try: cor_own_xgb = sum(map(abs,df_test["time"]-df_test[selected_col_own_xgb]))/len(df_test["time"]) except KeyError: selected_col_own_xgb = selected_col_own_xgb.split(".")[0] cor_own_xgb = sum(map(abs,df_test["time"]-df_test[selected_col_own_xgb]))/len(df_test["time"]) try: cor_own_svm = sum(map(abs,df_test["time"]-df_test[selected_col_own_svm]))/len(df_test["time"]) except KeyError: selected_col_own_svm = selected_col_own_svm.split(".")[0] cor_own_svm = sum(map(abs,df_test["time"]-df_test[selected_col_own_svm]))/len(df_test["time"]) try: cor_own_bay = sum(map(abs,df_test["time"]-df_test[selected_col_own_bay]))/len(df_test["time"]) except KeyError: selected_col_own_bay = selected_col_own_bay.split(".")[0] cor_own_bay = sum(map(abs,df_test["time"]-df_test[selected_col_own_bay]))/len(df_test["time"]) try: cor_own_lass = sum(map(abs,df_test["time"]-df_test[selected_col_own_lass]))/len(df_test["time"]) except KeyError: selected_col_own_lass = selected_col_own_lass.split(".")[0] cor_own_lass = sum(map(abs,df_test["time"]-df_test[selected_col_own_lass]))/len(df_test["time"]) try: cor_own_adab = sum(map(abs,df_test["time"]-df_test[selected_col_own_adab]))/len(df_test["time"]) except KeyError: selected_col_own_adab = selected_col_own_adab.split(".")[0] cor_own_adab = sum(map(abs,df_test["time"]-df_test[selected_col_own_adab]))/len(df_test["time"]) cor_l3 = sum(map(abs,df_test["time"]-df_test["preds"]))/len(df_test["time"]) # Variables holding all predictions across experiments all_preds_l1.extend(zip(df_test["time"],df_test[selected_col_l1],[experiment]*len(df_test[selected_col_l1]),[len(df_train.index)]*len(df_test[selected_col_l1]),[fold_num]*len(df_test[selected_col_l1]),df_test[selected_col_own_xgb],df_test[selected_col_own_bay],df_test[selected_col_own_lass],df_test[selected_col_own_adab])) all_preds_l2.extend(zip(df_test["time"],df_test[selected_col_l2],[experiment]*len(df_test[selected_col_l2]),[len(df_train.index)]*len(df_test[selected_col_l2]),[fold_num]*len(df_test[selected_col_l2]))) all_preds_l3.extend(zip(df_test["time"],df_test["preds"],[experiment]*len(df_test["preds"]),[len(df_train.index)]*len(df_test["preds"]),[fold_num]*len(df_test["preds"]))) # Also get the mae for the training models train_cor_l1 = sum(map(abs,df_train["time"]-df_train[selected_col_l1]))/len(df_train["time"]) train_cor_l2 = sum(map(abs,df_train["time"]-df_train[selected_col_l2]))/len(df_train["time"]) train_cor_l3 = sum(map(abs,df_train["time"]-df_train["preds"]))/len(df_train["time"]) print() print("Error l1: %s,%s" % (train_cor_l1,cor_l1)) print("Error l2: %s,%s" % (train_cor_l2,cor_l2)) print("Error l3: %s,%s" % (train_cor_l3,cor_l3)) print(selected_col_l1,selected_col_l2,selected_col_own_xgb) print() print() print("-------------") # Try to select the best Layer, this becomes Layer 4 cor_l4 = 0.0 if (train_cor_l1 < train_cor_l2) and (train_cor_l1 < train_cor_l3): cor_l4 = cor_l1 elif (train_cor_l2 < train_cor_l1) and (train_cor_l2 < train_cor_l3): cor_l4 = cor_l2 else: cor_l4 = cor_l3 return(cor_l1,cor_l2,cor_l3,cor_l4,cor_own_xgb,cor_own_svm,cor_own_bay,cor_own_adab,cor_own_lass,list(df_test[selected_col_l2]),list(df_test["time"]-df_test[selected_col_l2]),train_cor_l3)
74bfe9ce91f04a2ce3955098f9d1145c5c60ef4a
21,365
from operator import and_ def get_repository_metadata_by_changeset_revision( trans, id, changeset_revision ): """Get metadata for a specified repository change set from the database.""" # Make sure there are no duplicate records, and return the single unique record for the changeset_revision. Duplicate records were somehow # created in the past. The cause of this issue has been resolved, but we'll leave this method as is for a while longer to ensure all duplicate # records are removed. all_metadata_records = trans.sa_session.query( trans.model.RepositoryMetadata ) \ .filter( and_( trans.model.RepositoryMetadata.table.c.repository_id == trans.security.decode_id( id ), trans.model.RepositoryMetadata.table.c.changeset_revision == changeset_revision ) ) \ .order_by( trans.model.RepositoryMetadata.table.c.update_time.desc() ) \ .all() if len( all_metadata_records ) > 1: # Delete all recrds older than the last one updated. for repository_metadata in all_metadata_records[ 1: ]: trans.sa_session.delete( repository_metadata ) trans.sa_session.flush() return all_metadata_records[ 0 ] elif all_metadata_records: return all_metadata_records[ 0 ] return None
33f5da869f8fde08e2f83d7a60a708e4848664a1
21,366
import pickle import gc def generate_encounter_time(t_impact=0.495*u.Gyr, graph=False): """Generate fiducial model at t_impact after the impact""" # impact parameters M = 5e6*u.Msun rs = 10*u.pc # impact parameters Tenc = 0.01*u.Gyr dt = 0.05*u.Myr # potential parameters potential = 3 Vh = 225*u.km/u.s q = 1*u.Unit(1) rhalo = 0*u.pc par_pot = np.array([Vh.to(u.m/u.s).value, q.value, rhalo.to(u.m).value]) pkl = pickle.load(open('../data/fiducial_at_encounter.pkl', 'rb')) model = pkl['model'] xsub = pkl['xsub'] vsub = pkl['vsub'] # generate perturbed stream model potential_perturb = 2 par_perturb = np.array([M.to(u.kg).value, rs.to(u.m).value, 0, 0, 0]) #print(vsub.si, par_perturb) x1, x2, x3, v1, v2, v3 = interact.general_interact(par_perturb, xsub.to(u.m).value, vsub.to(u.m/u.s).value, Tenc.to(u.s).value, t_impact.to(u.s).value, dt.to(u.s).value, par_pot, potential, potential_perturb, model.x.to(u.m).value, model.y.to(u.m).value, model.z.to(u.m).value, model.v_x.to(u.m/u.s).value, model.v_y.to(u.m/u.s).value, model.v_z.to(u.m/u.s).value) stream = {} stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc) stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s) c = coord.Galactocentric(x=stream['x'][0], y=stream['x'][1], z=stream['x'][2], v_x=stream['v'][0], v_y=stream['v'][1], v_z=stream['v'][2], **gc_frame_dict) cg = c.transform_to(gc.GD1) wangle = 180*u.deg if graph: plt.close() plt.figure(figsize=(10,5)) plt.plot(cg.phi1.wrap_at(180*u.deg), cg.phi2, 'k.', ms=1) plt.xlim(-80,0) plt.ylim(-10,10) plt.tight_layout() return cg
0871e3b6f09e9bf1154182a3d0a24713a90f2fbb
21,367
def get_census_centroid(census_tract_id): """ Gets a pair of decimal coordinates representing the geographic center (centroid) of the requested census tract. :param census_tract_id: :return: """ global _cached_centroids if census_tract_id in _cached_centroids: return _cached_centroids[census_tract_id] tracts = census_tracts_db.as_dictionary() for tract in tracts: if tract_id_equals(census_tract_id, tract[census_tracts_db.ROW_GEOID]): _cached_centroids[census_tract_id] = float(tract[census_tracts_db.ROW_LATITUDE]), float(tract[census_tracts_db.ROW_LONGITUDE]) return _cached_centroids[census_tract_id]
ba3dde30ce9bd3eab96f8419580edfda051c5564
21,368
def configure_context(args: Namespace, layout: Layout, stop_event: Event) -> Context: """Creates the application context, manages state""" context = Context(args.file) context.layout = layout sensors = Sensors(context, stop_event) context.sensors = sensors listener = KeyListener(context.on_key, stop_event, sensors.get_lock()) context.listener = listener context.change_state("normal") context.load_config() return context
b24ee704939cf3f02774b6fe3c9399042247500a
21,369
def offsetEndpoint(points, distance, beginning=True): """ Pull back end point of way in order to create VISSIM intersection. Input: list of nodes, distance, beginning or end of link Output: transformed list of nodes """ if beginning: a = np.array(points[1], dtype='float') b = np.array(points[0], dtype='float') if not beginning: a = np.array(points[-2], dtype='float') b = np.array(points[-1], dtype='float') if np.sqrt(sum((b-a)**2)) < distance: distance = np.sqrt(sum((b-a)**2)) * 0.99 db = (b-a) / np.linalg.norm(b-a) * distance return b - db
a6733d5670221fbd14b527d63430ebd94e022a5a
21,370
def _remove_parenthesis(word): """ Examples -------- >>> _remove_parenthesis('(ROMS)') 'ROMS' """ try: return word[word.index("(") + 1 : word.rindex(")")] except ValueError: return word
f47cce7985196b1a9a12284e888b4097b26c32f4
21,371
def check_closed(f): """Decorator that checks if connection/cursor is closed.""" def g(self, *args, **kwargs): if self.closed: raise exceptions.Error(f"{self.__class__.__name__} already closed") return f(self, *args, **kwargs) return g
fcb7f8399ae759d644e47b6f8e8a6b887d9315fc
21,372
def get_box_filter(b: float, b_list: np.ndarray, width: float) -> np.ndarray: """ Returns the values of a box function filter centered on b, with specified width. """ return np.heaviside(width/2-np.abs(b_list-b), 1)
2885133af9f179fa5238d4fc054abfd48f317709
21,373
def search(ra=None, dec=None, radius=None, columns=None, offset=None, limit=None, orderby=None): """Creates a query for the carpyncho database, you can specify""" query = CarpynchoQuery(ra, dec, radius, columns, offset, limit, orderby) return query
aad189695ef93e44aa455635dae2791843f7d174
21,374
def repetitions(seq: str) -> int: """ [Easy] https://cses.fi/problemset/task/1069/ [Solution] https://cses.fi/paste/659d805082c50ec1219667/ You are given a DNA sequence: a string consisting of characters A, C, G, and T. Your task is to find the longest repetition in the sequence. This is a maximum-length substring containing only one type of character. The only input line contains a string of n characters. Print one integer, the length of the longest repetition. Constraints: 1 ≤ n ≤ 10^6 Example Input: ATTCGGGA Output: 3 """ res, cur = 0, 0 fir = '' for ch in seq: if ch == fir: cur += 1 else: res = max(res, cur) fir = ch cur = 1 return max(res, cur)
4dde2ec4a6cd6b13a54c2eafe4e8db0d87381faa
21,375
def measure(data, basis, gaussian=0, poisson=0): """Function computes the dot product <x,phi> for a given measurement basis phi Args: - data (n-size, numpy 1D array): the initial, uncompressed data - basis (nxm numpy 2D array): the measurement basis Returns: - A m-sized numpy 1D array to the dot product""" data = np.float_(data) if gaussian!=0 or poisson!=0: # Create the original matrix data = np.repeat([data], basis.shape[0], 0) if gaussian!=0: # Bruit data +=np.random.normal(scale=gaussian, size=data.shape) if poisson != 0: data = np.float_(np.random.poisson(np.abs(data))) if gaussian!=0 or poisson!=0: return np.diag((data).dot(basis.transpose())) else: return (data).dot(basis.transpose())
0a25ea52a67441972b65cdad7c76cb772ec6bc6d
21,377
def getUserByMail(email): """Get User by mailt.""" try: user = db_session.query(User).filter_by(email=email).one() return user except Exception: return None
423d5dc969d43e0f4a1aafc51b5a05671a1fc3e1
21,378
def parse_headers(headers, data): """ Given a header structure and some data, parse the data as headers. """ return {k: f(v) for (k, (f, _), _), v in zip(headers, data)}
456c2ab2d2f7832076a7263be8815b9abeec56dd
21,379
def dataset_parser(value, A): """Parse an ImageNet record from a serialized string Tensor.""" # return value[:A.shape[0]], value[A.shape[0]:] return value[:A.shape[0]], value
0b07b6eec9e3e23f470970c489ad83c416d650e7
21,380
def default_csv_file(): """ default name for csv files """ return 'data.csv'
e0a1267e1e8e463d435f3116e970132c4eab949d
21,381
def download(object_client, project_id, datasets_path): """Download the contents of file from the object store. Parameters ---------- object_client : faculty.clients.object.ObjectClient project_id : uuid.UUID datasets_path : str The target path to download to in the object store Returns ------- bytes The content of the file """ chunk_generator = download_stream(object_client, project_id, datasets_path) return b"".join(chunk_generator)
91da7409b4cc518d87b6502e193a4174c045be0e
21,383
from pathlib import Path from typing import Any def get_assets_of_dataset( db: Session = Depends(deps.get_db), dataset_id: int = Path(..., example="12"), offset: int = 0, limit: int = settings.DEFAULT_LIMIT, keyword: str = Query(None), viz_client: VizClient = Depends(deps.get_viz_client), current_user: models.User = Depends(deps.get_current_active_user), current_workspace: models.Workspace = Depends(deps.get_current_workspace), ) -> Any: """ Get asset list of specific dataset, pagination is supported by means of offset and limit """ dataset = crud.dataset.get_with_task(db, user_id=current_user.id, id=dataset_id) if not dataset: raise DatasetNotFound() assets = viz_client.get_assets( user_id=current_user.id, repo_id=current_workspace.hash, # type: ignore branch_id=dataset.task_hash, # type: ignore keyword=keyword, limit=limit, offset=offset, ) result = { "keywords": assets.keywords, "items": assets.items, "total": assets.total, } return {"result": result}
344095c94884059ed37662521f346d6c03bb4c7f
21,384
def check_rule(body, obj, obj_string, rule, only_body): """ Compare the argument with a rule. """ if only_body: # Compare only the body of the rule to the argument retval = (body == rule[2:]) else: retval = ((body == rule[2:]) and (obj == obj_string)) return retval
9237da310ebcc30f623211e659ac2247efb36f69
21,385
from warnings import warn def pdm_auto_arima(df, target_column, time_column, frequency_data, epochs_to_forecast = 12, d=1, D=0, seasonal=True, m =12, start_p = 2, start_q = 0, max_p=9, max_q=2, start_P = 0, start_Q = 0, max_P = 2, max_Q = 2, validate = False, epochs_to_test = 1): """ This function finds the best order parameters for a SARIMAX model, then makes a forecast Parameters: - df_input (pandas.DataFrame): Input Time Series. - target_column (str): name of the column containing the target feature - time_column (str): name of the column containing the pandas Timestamps - frequency_data (str): string representing the time frequency of record, e.g. "h" (hours), "D" (days), "M" (months) - epochs_to_forecast (int): number of steps for predicting future data - epochs_to_test (int): number of steps corresponding to most recent records to test on - d, D, m, start_p, start_q, max_p, max_q, start_P, start_Q, max_P, max_Q (int): SARIMAX parameters to be set for reseach - seasonal (bool): seasonality flag - validate (bool): if True, epochs_to_test rows are used for validating, else forecast without evaluation Returns: - forecast_df (pandas.DataFrame): Output DataFrame with best forecast found """ assert isinstance(target_column, str) assert isinstance(time_column, str) external_features = [col for col in df if col not in [time_column, target_column]] if epochs_to_test == 0: warn("epochs_to_test=0 and validate=True is not correct, setting validate=False instead") validate = False if frequency_data is not None: df = df.set_index(time_column).asfreq(freq=frequency_data, method="bfill").reset_index() if len(external_features) > 0: #Scaling all exogenous features scaler = MinMaxScaler() scaled = scaler.fit_transform(df.set_index(time_column).drop([target_column], axis = 1).values) train_df = df.dropna() train_df.set_index(time_column, inplace=True) if frequency_data is not None: date = pd.date_range(start=df[time_column].min(), periods=len(train_df)+epochs_to_forecast, freq=frequency_data) else: date = pd.date_range(start=df[time_column].min(), end=df[time_column].max(), periods=len(df)) ### Finding parameter using validation set if validate: train_df_validation = train_df[:-epochs_to_test] if len(external_features) > 0: exog_validation = scaled[:(len(train_df)-epochs_to_test)] model_validation = pmd_arima.auto_arima(train_df_validation[target_column],exogenous = exog_validation, max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) exog_validation_forecast = scaled[(len(train_df)-epochs_to_test):len(train_df)] forecast_validation, forecast_validation_ci = model_validation.predict(n_periods = epochs_to_test,exogenous= exog_validation_forecast, return_conf_int=True) validation_df = pd.DataFrame({target_column:train_df[target_column].values[(len(train_df)-epochs_to_test):len(train_df)],'Forecast':forecast_validation}) rmse = np.sqrt(mean_squared_error(validation_df[target_column].values, validation_df.Forecast.values)) print(f'RMSE: {rmse}') exog = scaled[:len(train_df)] model = pmd_arima.ARIMA( order = list(model_validation.get_params()['order']), seasonal_order = list(model_validation.get_params()['seasonal_order']), trace=False,error_action='ignore', suppress_warnings=True) model.fit(y = train_df[target_column],exogenous = exog) training_prediction = model.predict_in_sample(exogenous = exog_validation) else: model_validation = pmd_arima.auto_arima(train_df_validation[target_column], max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) forecast_validation, forecast_validation_ci = model_validation.predict(n_periods = epochs_to_test, return_conf_int=True) validation_df = pd.DataFrame({target_column:train_df[target_column].values[(len(train_df)-epochs_to_test):len(train_df)],'Forecast':forecast_validation}) rmse = np.sqrt(mean_squared_error(validation_df[target_column].values, validation_df.Forecast.values)) print(f'RMSE: {rmse}') #exog = scaled[:len(train_df)] model = pmd_arima.ARIMA( order = list(model_validation.get_params()['order']), seasonal_order = list(model_validation.get_params()['seasonal_order']), trace=False,error_action='ignore', suppress_warnings=True) model.fit(y = train_df[target_column]) training_prediction = model.predict_in_sample() else: if len(external_features) > 0: #Select exogenous features for training exog = scaled[:len(train_df)] #Search for best model model = pmd_arima.auto_arima(train_df[target_column],exogenous = exog, max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) training_prediction = model.predict_in_sample(exogenous = exog) #Training set predictions else: #Search for best model model = pmd_arima.auto_arima(train_df[target_column], max_order = 30, m=m, d=d,start_p=start_p, start_q=start_q,max_p=max_p, max_q=max_q, # basic polynomial seasonal=seasonal,D=D, start_P=start_P, max_P = max_P,start_Q = start_Q, max_Q= max_Q, #seasonal polynomial trace=False,error_action='ignore', suppress_warnings=True, stepwise=True) training_prediction = model.predict_in_sample() #Training set predictions ### Forecasting if len(external_features) > 0: exog_forecast = scaled[len(train_df):len(train_df)+epochs_to_forecast] #Forecast if len(exog_forecast)==0: exog_forecast = np.nan * np.ones((epochs_to_forecast,exog.shape[1])) if epochs_to_forecast > 0: if len(external_features) > 0: forecast, forecast_ci = model.predict(n_periods = len(exog_forecast),exogenous= exog_forecast, return_conf_int=True) else: forecast, forecast_ci = model.predict(n_periods = epochs_to_forecast, return_conf_int=True) #Building output dataset forecast_df=pd.DataFrame() forecast_df[target_column] = df[target_column].values[:len(train_df)+epochs_to_forecast]#df[target_column].values forecast_df['forecast'] = np.nan forecast_df['forecast_up'] = np.nan forecast_df['forecast_low'] = np.nan if validate and epochs_to_forecast > 0: forecast_df['forecast'].iloc[-epochs_to_forecast-epochs_to_test:-epochs_to_forecast] = forecast_validation forecast_df['forecast_up'].iloc[-epochs_to_forecast-epochs_to_test:-epochs_to_forecast] = forecast_validation_ci[:,1] forecast_df['forecast_low'].iloc[-epochs_to_forecast-epochs_to_test:-epochs_to_forecast] = forecast_validation_ci[:,0] elif validate and epochs_to_forecast == 0: forecast_df['forecast'].iloc[-epochs_to_forecast-epochs_to_test:] = forecast_validation forecast_df['forecast_up'].iloc[-epochs_to_forecast-epochs_to_test:] = forecast_validation_ci[:,1] forecast_df['forecast_low'].iloc[-epochs_to_forecast-epochs_to_test:] = forecast_validation_ci[:,0] if epochs_to_forecast > 0: forecast_df['forecast'].iloc[-epochs_to_forecast:] = forecast forecast_df['forecast_up'].iloc[-epochs_to_forecast:] = forecast_ci[:,1] forecast_df['forecast_low'].iloc[-epochs_to_forecast:] = forecast_ci[:,0] forecast_df[time_column] = date return forecast_df
d5dd6d8ddf01358cde26f9e467e410419290da2e
21,386
def get_lines(filename): """ Returns a list of lines of a file. Parameters filename : str, name of control file """ with open(filename, "r") as f: lines = f.readlines() return lines
1307b169733b50517b26ecbf0414ca3396475360
21,387
def _check_socket_state(realsock, waitfor="rw", timeout=0.0): """ <Purpose> Checks if the given socket would block on a send() or recv(). In the case of a listening socket, read_will_block equates to accept_will_block. <Arguments> realsock: A real socket.socket() object to check for. waitfor: An optional specifier of what to wait for. "r" for read only, "w" for write only, and "rw" for read or write. E.g. if timeout is 10, and wait is "r", this will block for up to 10 seconds until read_will_block is false. If you specify "r", then write_will_block is always true, and if you specify "w" then read_will_block is always true. timeout: An optional timeout to wait for the socket to be read or write ready. <Returns> A tuple, (read_will_block, write_will_block). <Exceptions> As with select.select(). Probably best to wrap this with _is_recoverable_network_exception and _is_terminated_connection_exception. Throws an exception if waitfor is not in ["r","w","rw"] """ # Check that waitfor is valid if waitfor not in ["rw","r","w"]: raise Exception, "Illegal waitfor argument!" # Array to hold the socket sock_array = [realsock] # Generate the read/write arrays read_array = [] if "r" in waitfor: read_array = sock_array write_array = [] if "w" in waitfor: write_array = sock_array # Call select() (readable, writeable, exception) = select.select(read_array,write_array,sock_array,timeout) # If the socket is in the exception list, then assume its both read and writable if (realsock in exception): return (False, False) # Return normally then return (realsock not in readable, realsock not in writeable)
f4f493f03a2cd824a2bdc343f9367611011558eb
21,388
def str_to_pauli_term(pauli_str: str, qubit_labels=None): """ Convert a string into a pyquil.paulis.PauliTerm. >>> str_to_pauli_term('XY', []) :param str pauli_str: The input string, made of of 'I', 'X', 'Y' or 'Z' :param set qubit_labels: The integer labels for the qubits in the string, given in reverse order. If None, default to the range of the length of pauli_str. :return: the corresponding PauliTerm :rtype: pyquil.paulis.PauliTerm """ if qubit_labels is None: labels_list = [idx for idx in reversed(range(len(pauli_str)))] else: labels_list = sorted(qubit_labels)[::-1] pauli_term = PauliTerm.from_list(list(zip(pauli_str, labels_list))) return pauli_term
3a5be0f84006979f9b7dbb6ed436e98e7554cf68
21,389
from typing import Type def _GetNextPartialIdentifierToken(start_token): """Returns the first token having identifier as substring after a token. Searches each token after the start to see if it contains an identifier. If found, token is returned. If no identifier is found returns None. Search is abandoned when a FLAG_ENDING_TYPE token is found. Args: start_token: The token to start searching after. Returns: The token found containing identifier, None otherwise. """ token = start_token.next while token and token.type not in Type.FLAG_ENDING_TYPES: match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search( token.string) if match is not None and token.type == Type.COMMENT: return token token = token.next return None
e486ba1f5e9ee1b2d6c01aa5aa9d5d5270e0db10
21,390
def _challenge_transaction(client_account): """ Generate the challenge transaction for a client account. This is used in `GET <auth>`, as per SEP 10. Returns the XDR encoding of that transaction. """ builder = Builder.challenge_tx( server_secret=settings.STELLAR_ACCOUNT_SEED, client_account_id=client_account, archor_name=ANCHOR_NAME, network=settings.STELLAR_NETWORK, ) builder.sign(secret=settings.STELLAR_ACCOUNT_SEED) envelope_xdr = builder.gen_xdr() return envelope_xdr.decode("ascii")
a1762788077d7e9403c7e5d3b94e78cca11f0ce8
21,391
def mapCtoD(sys_c, t=(0, 1), f0=0.): """Map a MIMO continuous-time to an equiv. SIMO discrete-time system. The criterion for equivalence is that the sampled pulse response of the CT system must be identical to the impulse response of the DT system. i.e. If ``yc`` is the output of the CT system with an input ``vc`` taken from a set of DACs fed with a single DT input ``v``, then ``y``, the output of the equivalent DT system with input ``v`` satisfies: ``y(n) = yc(n-)`` for integer ``n``. The DACs are characterized by rectangular impulse responses with edge times specified in the t list. **Input:** sys_c : object the LTI description of the CT system, which can be: * the ABCD matrix, * a list-like containing the A, B, C, D matrices, * a list of zpk tuples (internally converted to SS representation). * a list of LTI objects t : array_like The edge times of the DAC pulse used to make CT waveforms from DT inputs. Each row corresponds to one of the system inputs; [-1 -1] denotes a CT input. The default is [0 1], for all inputs except the first. f0 : float The (normalized) frequency at which the Gp filters' gains are to be set to unity. Default 0 (DC). **Output:** sys : tuple the LTI description for the DT equivalent, in A, B, C, D representation. Gp : list of lists the mixed CT/DT prefilters which form the samples fed to each state for the CT inputs. **Example:** Map the standard second order CT modulator shown below to its CT equivalent and verify that its NTF is :math:`(1-z^{-1})^2`. .. image:: ../doc/_static/mapCtoD.png :align: center :alt: mapCtoD block diagram It can be done as follows:: from __future__ import print_function import numpy as np from scipy.signal import lti from deltasigma import * LFc = lti([[0, 0], [1, 0]], [[1, -1], [0, -1.5]], [[0, 1]], [[0, 0]]) tdac = [0, 1] LF, Gp = mapCtoD(LFc, tdac) LF = lti(*LF) ABCD = np.vstack(( np.hstack((LF.A, LF.B)), np.hstack((LF.C, LF.D)) )) NTF, STF = calculateTF(ABCD) print("NTF:") # after rounding to a 1e-6 resolution print("Zeros:", np.real_if_close(np.round(NTF.zeros, 6))) print("Poles:", np.real_if_close(np.round(NTF.poles, 6))) Prints:: Zeros: [ 1. 1.] Poles: [ 0. 0.] Equivalent to:: (z -1)^2 NTF = ---------- z^2 .. seealso:: R. Schreier and B. Zhang, "Delta-sigma modulators employing \ continuous-time circuitry," IEEE Transactions on Circuits and Systems I, \ vol. 43, no. 4, pp. 324-332, April 1996. """ # You need to have A, B, C, D specification of the system Ac, Bc, Cc, Dc = _getABCD(sys_c) ni = Bc.shape[1] # Sanitize t if hasattr(t, 'tolist'): t = t.tolist() if (type(t) == tuple or type(t) == list) and np.isscalar(t[0]): t = [t] # we got a simple list, like the default value if not (type(t) == tuple or type(t) == list) and \ not (type(t[0]) == tuple or type(t[0]) == list): raise ValueError("The t argument has an unrecognized shape") # back to business t = np.array(t) if t.shape == (1, 2) and ni > 1: t = np.vstack((np.array([[-1, -1]]), np.dot(np.ones((ni - 1, 1)), t))) if t.shape != (ni, 2): raise ValueError('The t argument has the wrong dimensions.') di = np.ones(ni).astype(bool) for i in range(ni): if t[i, 0] == -1 and t[i, 1] == -1: di[i] = False # c2d assumes t1=0, t2=1. # Also c2d often complains about poor scaling and can even produce # incorrect results. A, B, C, D, _ = cont2discrete((Ac, Bc, Cc, Dc), 1, method='zoh') Bc1 = Bc[:, ~di] # Examine the discrete-time inputs to see how big the # augmented matrices need to be. B1 = B[:, ~di] D1 = D[:, ~di] n = A.shape[0] t2 = np.ceil(t[di, 1]).astype(np.int_) esn = (t2 == t[di, 1]) and (D[0, di] != 0).T # extra states needed? npp = n + np.max(t2 - 1 + 1*esn) # Augment A to npp x npp, B to np x 1, C to 1 x np. Ap = padb(padr(A, npp), npp) for i in range(n + 1, npp): Ap[i, i - 1] = 1 Bp = np.zeros((npp, 1)) if npp > n: Bp[n, 0] = 1 Cp = padr(C, npp) Dp = np.zeros((1, 1)) # Add in the contributions from each DAC for i in np.flatnonzero(di): t1 = t[i, 0] t2 = t[i, 1] B2 = B[:, i] D2 = D[:, i] if t1 == 0 and t2 == 1 and D2 == 0: # No fancy stuff necessary Bp = Bp + padb(B2, npp) else: n1 = np.floor(t1) n2 = np.ceil(t2) - n1 - 1 t1 = t1 - n1 t2 = t2 - n2 - n1 if t2 == 1 and D2 != 0: n2 = n2 + 1 extraStateNeeded = 1 else: extraStateNeeded = 0 nt = n + n1 + n2 if n2 > 0: if t2 == 1: Ap[:n, nt - n2:nt] = Ap[:n, nt - n2:nt] + np.tile(B2, (1, n2)) else: Ap[:n, nt - n2:nt - 1] = Ap[:n, nt - n2:nt - 1] + np.tile(B2, (1, n2 - 1)) Ap[:n, (nt-1)] = Ap[:n, (nt-1)] + _B2formula(Ac, 0, t2, B2) if n2 > 0: # pulse extends to the next period Btmp = _B2formula(Ac, t1, 1, B2) else: # pulse ends in this period Btmp = _B2formula(Ac, t1, t2, B2) if n1 > 0: Ap[:n, n + n1 - 1] = Ap[:n, n + n1 - 1] + Btmp else: Bp = Bp + padb(Btmp, npp) if n2 > 0: Cp = Cp + padr(np.hstack((np.zeros((D2.shape[0], n + n1)), D2*np.ones((1, n2)))), npp) sys = (Ap, Bp, Cp, Dp) if np.any(~di): # Compute the prefilters and add in the CT feed-ins. # Gp = inv(sI - Ac)*(zI - A)/z*Bc1 n, m = Bc1.shape Gp = np.empty_like(np.zeros((n, m)), dtype=object) # !!Make this like stf: an array of zpk objects ztf = np.empty_like(Bc1, dtype=object) # Compute the z-domain portions of the filters ABc1 = np.dot(A, Bc1) for h in range(m): for i in range(n): if Bc1[i, h] == 0: ztf[i, h] = (np.array([]), np.array([0.]), -ABc1[i, h]) # dt=1 else: ztf[i, h] = (np.atleast_1d(ABc1[i, h]/Bc1[i, h]), np.array([0.]), Bc1[i, h]) # dt = 1 # Compute the s-domain portions of each of the filters stf = np.empty_like(np.zeros((n, n)), dtype=object) # stf[out, in] = zpk for oi in range(n): for ii in range(n): # Doesn't do pole-zero cancellation stf[oi, ii] = ss2zpk(Ac, np.eye(n), np.eye(n)[oi, :], np.zeros((1, n)), input=ii) # scipy as of v 0.13 has no support for LTI MIMO systems # only 'MISO', therefore you can't write: # stf = ss2zpk(Ac, eye(n), eye(n), np.zeros(n, n))) for h in range(m): for i in range(n): # k = 1 unneded, see below for j in range(n): # check the k values for a non-zero term if stf[i, j][2] != 0 and ztf[j, h][2] != 0: if Gp[i, h] is None: Gp[i, h] = {} Gp[i, h].update({'Hs':[list(stf[i, j])]}) Gp[i, h].update({'Hz':[list(ztf[j, h])]}) else: Gp[i, h].update({'Hs':Gp[i, h]['Hs'] + [list(stf[i, j])]}) Gp[i, h].update({'Hz':Gp[i, h]['Hz'] + [list(ztf[j, h])]}) # the MATLAB-like cell code for the above statements would have # been: #Gp[i, h](k).Hs = stf[i, j] #Gp[i, h](k).Hz = ztf[j, h] #k = k + 1 if f0 != 0: # Need to correct the gain terms calculated by c2d # B1 = gains of Gp @f0; for h in range(m): for i in range(n): B1ih = np.real_if_close(evalMixedTF(Gp[i, h], f0)) # abs() used because ss() whines if B has complex entries... # This is clearly incorrect. # I've fudged the complex stuff by including a sign.... B1[i, h] = np.abs(B1ih) * np.sign(np.real(B1ih)) if np.abs(B1[i, h]) < 1e-09: B1[i, h] = 1e-09 # This prevents NaN in "line 174" below # Adjust the gains of the pre-filters for h in range(m): for i in range(n): for j in range(max(len(Gp[i, h]['Hs']), len(Gp[i, h]['Hz']))): # The next is "line 174" Gp[i, h]['Hs'][j][2] = Gp[i, h]['Hs'][j][2]/B1[i, h] sys = (sys[0], # Ap np.hstack((padb(B1, npp), sys[1])), # new B sys[2], # Cp np.hstack((D1, sys[3]))) # new D return sys, Gp
6aa83119efcad68b1fdf3a0cbc5467c53d2a30bb
21,392
def normalize_type(type: str) -> str: """Normalize DataTransfer's type strings. https://html.spec.whatwg.org/multipage/dnd.html#dom-datatransfer-getdata 'text' -> 'text/plain' 'url' -> 'text/uri-list' """ if type == 'text': return 'text/plain' elif type == 'url': return 'text/uri-list' return type
887c532218a7775ea55c6a39953ec244183af455
21,393
def _parity(N, j): """Private function to calculate the parity of the quantum system. """ if j == 0.5: pi = np.identity(N) - np.sqrt((N - 1) * N * (N + 1) / 2) * _lambda_f(N) return pi / N elif j > 0.5: mult = np.int32(2 * j + 1) matrix = np.zeros((mult, mult)) foo = np.ones(mult) for n in np.arange(-j, j + 1, 1): for l in np.arange(0, mult, 1): foo[l] = (2 * l + 1) * qutip.clebsch(j, l, j, n, 0, n) matrix[np.int32(n + j), np.int32(n + j)] = np.sum(foo) return matrix / mult
5afc399cc6f303ba35d7e7c6b6b039130fcd1b17
21,394
def get_log(id): """Returns the log for the given ansible play. This works on both live and finished plays. .. :quickref: Play; Returns the log for the given ansible play :param id: play id **Example Request**: .. sourcecode:: http GET /api/v2/plays/345835/log HTTP/1.1 **Example Response**: .. sourcecode:: http HTTP/1.1 200 OK ... log file from the given play ... """ # For security, send_from_directory avoids sending any files # outside of the specified directory return send_from_directory(get_log_dir_abs(), str(id) + ".log")
7a67f7b9d89df39824e566fcb11083be9d3f76e8
21,395
def filterLinesByCommentStr(lines, comment_str='#'): """ Filter all lines from a file.readlines output which begins with one of the symbols in the comment_str. """ comment_line_idx = [] for i, line in enumerate(lines): if line[0] in comment_str: comment_line_idx.append(i) for j in comment_line_idx[::-1]: del lines[j] return lines
8a6ce56187afc2368ec81d11c38fe7af2eacb14f
21,396
def assemble_result_from_graph(type_spec, binding, output_map): """Assembles a result stamped into a `tf.Graph` given type signature/binding. This method does roughly the opposite of `capture_result_from_graph`, in that whereas `capture_result_from_graph` starts with a single structured object made up of tensors and computes its type and bindings, this method starts with the type/bindings and constructs a structured object made up of tensors. Args: type_spec: The type signature of the result to assemble, an instance of `types.Type` or something convertible to it. binding: The binding that relates the type signature to names of tensors in the graph, an instance of `pb.TensorFlow.Binding`. output_map: The mapping from tensor names that appear in the binding to actual stamped tensors (possibly renamed during import). Returns: The assembled result, a Python object that is composed of tensors, possibly nested within Python structures such as anonymous tuples. Raises: TypeError: If the argument or any of its parts are of an uexpected type. ValueError: If the arguments are invalid or inconsistent witch other, e.g., the type and binding don't match, or the tensor is not found in the map. """ type_spec = computation_types.to_type(type_spec) py_typecheck.check_type(type_spec, computation_types.Type) py_typecheck.check_type(binding, pb.TensorFlow.Binding) py_typecheck.check_type(output_map, dict) for k, v in output_map.items(): py_typecheck.check_type(k, str) if not tf.is_tensor(v): raise TypeError( 'Element with key {} in the output map is {}, not a tensor.'.format( k, py_typecheck.type_string(type(v)))) binding_oneof = binding.WhichOneof('binding') if isinstance(type_spec, computation_types.TensorType): if binding_oneof != 'tensor': raise ValueError( 'Expected a tensor binding, found {}.'.format(binding_oneof)) elif binding.tensor.tensor_name not in output_map: raise ValueError('Tensor named {} not found in the output map.'.format( binding.tensor.tensor_name)) else: return output_map[binding.tensor.tensor_name] elif isinstance(type_spec, computation_types.NamedTupleType): if binding_oneof != 'tuple': raise ValueError( 'Expected a tuple binding, found {}.'.format(binding_oneof)) else: type_elements = anonymous_tuple.to_elements(type_spec) if len(binding.tuple.element) != len(type_elements): raise ValueError( 'Mismatching tuple sizes in type ({}) and binding ({}).'.format( len(type_elements), len(binding.tuple.element))) result_elements = [] for (element_name, element_type), element_binding in zip(type_elements, binding.tuple.element): element_object = assemble_result_from_graph(element_type, element_binding, output_map) result_elements.append((element_name, element_object)) if not isinstance(type_spec, computation_types.NamedTupleTypeWithPyContainerType): return anonymous_tuple.AnonymousTuple(result_elements) container_type = computation_types.NamedTupleTypeWithPyContainerType.get_container_type( type_spec) if (py_typecheck.is_named_tuple(container_type) or py_typecheck.is_attrs(container_type)): return container_type(**dict(result_elements)) return container_type(result_elements) elif isinstance(type_spec, computation_types.SequenceType): if binding_oneof != 'sequence': raise ValueError( 'Expected a sequence binding, found {}.'.format(binding_oneof)) else: sequence_oneof = binding.sequence.WhichOneof('binding') if sequence_oneof == 'variant_tensor_name': variant_tensor = output_map[binding.sequence.variant_tensor_name] return make_dataset_from_variant_tensor(variant_tensor, type_spec.element) else: raise ValueError( 'Unsupported sequence binding \'{}\'.'.format(sequence_oneof)) else: raise ValueError('Unsupported type \'{}\'.'.format(type_spec))
a25b4d935dfcb62acad15da5aeafee390b03a38c
21,397
def parse(peaker): # type: (Peaker[Token]) -> Node """Parse the docstring. Args: peaker: A Peaker filled with the lexed tokens of the docstring. Raises: ParserException: If there is anything malformed with the docstring, or if anything goes wrong with parsing. # noqa Returns: The parsed docstring as an AST. """ keyword_parse_lookup = { 'Args': parse_args, 'Arguments': parse_args, 'Returns': parse_returns, 'Yields': parse_yields, 'Raises': parse_raises, } children = [ parse_description(peaker) ] while peaker.has_next(): next_value = peaker.peak().value if next_value in keyword_parse_lookup: children.append( keyword_parse_lookup[next_value](peaker) ) else: children.append( parse_long_description(peaker) ) return Node( node_type=NodeType.DOCSTRING, children=children, )
abd8b495281c159f070a890a392d7a80da740fa4
21,398
import ctypes def sphrec(r, colat, lon): """ Convert from spherical coordinates to rectangular coordinates. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sphrec_c.html :param r: Distance of a point from the origin. :type r: float :param colat: Angle of the point from the positive Z-axis. :type colat: float :param lon: Angle of the point from the XZ plane in radians. :type lon: float :return: Rectangular coordinates of the point. :rtype: 3-Element Array of floats """ r = ctypes.c_double(r) colat = ctypes.c_double(colat) lon = ctypes.c_double(lon) rectan = stypes.emptyDoubleVector(3) libspice.sphrec_c(r, colat, lon, rectan) return stypes.cVectorToPython(rectan)
d633b26cd6776d13b6d0e66a9366676c8b8ac962
21,400
import json def counter_endpoint( event=None, context=None ): """ API endpoint that returns the total number of UFO sightings. An example request might look like: .. sourcecode:: http GET www.x.com/counter HTTP/1.1 Host: example.com Accept: application/json, text/javascript Results will be returned as JSON object with the following format: .. code-block:: json { "count": <number> } """ return app.response_class( json.dumps( count_rows_in_table() ), mimetype='application/json' )
e492253f36736c112dcafc15d0e5c30cf27d5560
21,401
def search_trie(result, trie): """ trie search """ if result.is_null(): return [] # output ret_vals = [] for token_str in result: ret_vals += trie.find(token_str) if result.has_memory(): ret_vals = [ one_string for one_string in ret_vals if result.is_memorized(one_string) == False ] return ret_vals
75ad08db7962b47ea6402e866cf4a7a9861037c9
21,402
def get_nb_entry(path_to_notes: str = None, nb_name: str = None, show_index: bool = True) -> str: """Returns the entry of a notebook. This entry is to be used for the link to the notebook from the table of contents and from the navigators. Depending on the value of the argument `show_index`, the entry can be either the full entry provided by the function `get_nb_full_entry()` or simply the title of the notebook, provided by the function `get_nb_title()`. Parameters ---------- path_to_notes : str The path to the directory that contains the notebooks, either absolute or relative to the script that calls `nbbinder.bind()`. nb_name : str The name of the jupyter notebook file. show_index : boolean Indicates whether to include the chapter and section numbers of the notebook in the table of contents (if True) or just the title (if False). Returns ------- entry : str A string with the entry name. """ if show_index: entry = ''.join(list(get_nb_full_entry(path_to_notes, nb_name)[1:3])) else: entry = get_nb_title(path_to_notes, nb_name) return entry
ec216cae586d2746af80ca88a428c6b907ad5240
21,403
def get_label_encoder(config): """Gets a label encoder given the label type from the config Args: config (ModelConfig): A model configuration Returns: LabelEncoder: The appropriate LabelEncoder object for the given config """ return LABEL_MAP[config.label_type](config)
8c7d6e9058af81c94cde039030fed12c4a65b8e6
21,404
def get_lesson_comment_by_sender_user_id(): """ { "page": "Long", "size": "Long", "sender_user_id": "Long" } """ domain = request.args.to_dict() return lesson_comment_service.get_lesson_comment_by_sender_user_id(domain)
7a20e44af39e2efc5cb83eedd9dfb74124a2777f
21,405
def _inertia_grouping(stf): """Grouping function for class inertia. """ if hasattr(stf[2], 'inertia_constant'): return True else: return False
a7689324ccabf601bf8beaec4c1826e8df25880b
21,406
def parse_input(raw_input: str) -> nx.DiGraph: """Parses Day 12 puzzle input.""" graph = nx.DiGraph() graph.add_nodes_from([START, END]) for line in raw_input.strip().splitlines(): edge = line.split('-') for candidate in [edge, list(reversed(edge))]: if candidate[0] == END: continue if candidate[1] == START: continue graph.add_edge(*candidate) return graph
1c38124fed386829d712041074cc76c891981498
21,407
def zonal_mode_extract(infield, mode_keep, low_pass = False): """ Subfunction to extract or swipe out zonal modes (mode_keep) of (y, x) data. Assumes here that the data is periodic in axis = 1 (in the x-direction) with the end point missing If mode_keep = 0 then this is just the zonal averaged field Input: in_field 2d layer input field mode_keep the zonal mode of the data to be extracted from Opt input: low_pass get rid of all modes from mode_keep + 1 onwards Output: outfield zonal mode of the data """ outfield_h = rfft(infield, axis = 1) outfield_h[:, mode_keep+1::] = 0 if not low_pass: outfield_h[:, 0:mode_keep] = 0 return irfft(outfield_h, axis = 1)
a73015ac000668d11dd97ef0c8f435181fb0b9f7
21,408
import pickle def clone(): """Clone model PUT /models Parameters: { "model_name": <model_name_to_clone>, "new_model_name": <name_for_new_model> } Returns: - {"model_names": <list_of_model_names_in_session>} """ request_json = request.get_json() name = request_json["model_name"] new_name = request_json["new_model_name"] models = None if 'models' in session: models = pickle.loads(session["models"]) else: models = {} if name in models: models[new_name] = models[name].clone() session["models"] = pickle.dumps(models) res = {"model_names": get_model_names()} return jsonify(res)
49aaf81371f197858e4347efdfa04136e3342dc7
21,409
def GetFlagFromDest(dest): """Returns a conventional flag name given a dest name.""" return '--' + dest.replace('_', '-')
021ab8bca05afbb2325d865a299a2af7c3b939c9
21,410
def get_rst_export_elements( file_environment, environment, module_name, module_path_name, skip_data_value=False, skip_attribute_value=False, rst_elements=None ): """Return :term:`reStructuredText` from exported elements within *file_environment*. *environment* is the full :term:`Javascript` environment processed in :mod:`~champollion.parser`. *module_name* is the module alias that should be added to each directive. *module_path_name* is the module path alias that should be added to each directive. *skip_data_value* indicate whether data value should not be displayed. *skip_attribute_value* indicate whether attribute value should not be displayed. *rst_elements* can be an initial dictionary that will be updated and returned. """ export_environment = file_environment["export"] import_environment = file_environment["import"] if rst_elements is None: rst_elements = {} for _exported_env_id, _exported_env in export_environment.items(): from_module_id = _exported_env["module"] line_number = _exported_env["line_number"] if line_number not in rst_elements.keys(): rst_elements[line_number] = [] name = _exported_env["name"] alias = _exported_env["alias"] if alias is None: alias = name # Update module origin and name from import if necessary if (from_module_id is None and _exported_env_id in import_environment.keys()): name = import_environment[_exported_env_id]["name"] from_module_id = import_environment[_exported_env_id]["module"] # Ignore element if the origin module can not be found if from_module_id not in environment["module"].keys(): continue from_module_environment = environment["module"][from_module_id] from_file_id = from_module_environment["file_id"] from_file_env = environment["file"][from_file_id] if name == "default": rst_element = get_rst_default_from_file_environment( from_file_env, alias, module_name, module_path_name, skip_data_value=skip_data_value, skip_attribute_value=skip_attribute_value, ) if rst_element is None: continue rst_elements[line_number].append(rst_element) elif name == "*": extra_options = [ ":force-partial-import:", ":members:", ":skip-description:" ] if skip_data_value: extra_options.append(":skip-data-value:") if skip_attribute_value: extra_options.append(":skip-attribute-value:") rst_element = rst_generate( directive="automodule", element_id=from_module_id, module_alias=module_name, module_path_alias=module_path_name, extra_options=extra_options ) rst_elements[line_number].append(rst_element) else: rst_element = get_rst_name_from_file_environment( name, from_file_env, alias, module_name, module_path_name, skip_data_value=skip_data_value, skip_attribute_value=skip_attribute_value, ) if rst_element is None: continue rst_elements[line_number].append(rst_element) return rst_elements
4b3a055e47b7c859216b26ec50bf21bcec3af076
21,411
def ganache_url(host='127.0.0.1', port='7445'): """Return URL for Ganache test server.""" return f"http://{host}:{port}"
9de6e2c26c0e1235a14c8dd28040fcdfb8a36a7f
21,412
def to_news_detail_list_by_period(uni_id_list: list, start: str, end: str) -> list: """ 根据统一社会信用代码列表,获取企业在给定日期范围的新闻详情列表,使用串行 :param end: :param start: :param uni_id_list: :return: """ detail_list = [] for uni_id in uni_id_list: for summary in to_news_summary_list_by_period(uni_id, start, end): detail_list.append(to_news_detail_by_summary(summary)) return detail_list
27493cc0f50a443cea69be74cd2bb1c494e1687f
21,413
from typing import List def positions_to_df(positions: List[alp_api.entity.Asset]) -> pd.DataFrame: """Generate a df from alpaca api assests Parameters ---------- positions : List[alp_api.entity.Asset] List of alpaca trade assets Returns ------- pd.DataFrame Processed dataframe """ df = pd.DataFrame(columns=["Symbol", "MarketValue", "Quantity", "CostBasis"]) sym = [] mv = [] qty = [] cb = [] for pos in positions: sym.append(pos.symbol) mv.append(float(pos.market_value)) qty.append(float(pos.qty)) cb.append(float(pos.cost_basis)) df["Symbol"] = sym df["MarketValue"] = mv df["Quantity"] = qty df["CostBasis"] = cb df["Broker"] = "alp" return df
5f77f4862f0244ba66e3d99e8a34e2dd8a56d91d
21,414
import requests from bs4 import BeautifulSoup def get_all_pages(date): """For the specific date, get all page URLs.""" r = requests.get(URL, params={"search": date}) soup = BeautifulSoup(r.text, "html.parser") return [ f"https://www.courts.phila.gov/{url}" for url in set([a["href"] for a in soup.select(".pagination li a")]) ]
f74e2167498fa8eb95e81c07c49a79b690adfcb2
21,415
from typing import List def boundary_condition( outer_bc_geometry: List[float], inner_bc_geometry: List[float], bc_num: List[int], T_end: float, ): """ Generate BC points for outer and inner boundaries """ x_l, x_r, y_d, y_u = outer_bc_geometry xc_l, xc_r, yc_d, yc_u = inner_bc_geometry N_x, N_y, N_t, N_bc = bc_num N_bc = N_bc // 4 + 1 # generate bc for outer boundary left_points = np.stack((np.ones(N_y) * x_l, np.linspace(y_d, y_u, N_y)), 1) right_points = np.stack((np.ones(N_y) * x_r, np.linspace(y_d, y_u, N_y)), 1) t_lr = np.repeat(np.linspace(0, T_end, N_t), N_y).reshape(-1, 1) X_left = np.hstack((t_lr, np.vstack([left_points for _ in range(N_t)]))) X_right = np.hstack((t_lr, np.vstack([right_points for _ in range(N_t)]))) X_lr = np.concatenate((X_left, X_right), 1) lr_idx = np.random.choice(len(X_lr), size=N_bc, replace=False) X_lr = X_lr[lr_idx] down_points = np.stack((np.linspace(x_l, x_r, N_x), np.ones(N_x) * y_d), 1) up_points = np.stack((np.linspace(x_l, x_r, N_x), np.ones(N_x) * y_u), 1) t_du = np.repeat(np.linspace(0, T_end, N_t), N_x).reshape(-1, 1) X_down = np.hstack((t_du, np.vstack([down_points for _ in range(N_t)]))) X_up = np.hstack((t_du, np.vstack([up_points for _ in range(N_t)]))) X_du = np.concatenate((X_down, X_up), 1) ud_idx = np.random.choice(len(X_du), size=N_bc, replace=False) X_du = X_du[ud_idx] X_bc_outer = (X_lr, X_du) # generate bc for inner boundary left_points = np.stack((np.ones(N_y) * xc_l, np.linspace(yc_d, yc_u, N_y)), 1) right_points = np.stack((np.ones(N_y) * xc_r, np.linspace(yc_d, yc_u, N_y)), 1) t_lr = np.repeat(np.linspace(0, T_end, N_t), N_y).reshape(-1, 1) X_left = np.hstack((t_lr, np.vstack([left_points for _ in range(N_t)]))) X_right = np.hstack((t_lr, np.vstack([right_points for _ in range(N_t)]))) X_lr = np.concatenate((X_left, X_right), 1) lr_idx = np.random.choice(len(X_lr), size=N_bc, replace=False) X_lr = X_lr[lr_idx] down_points = np.stack((np.linspace(xc_l, xc_r, N_x), np.ones(N_x) * yc_d), 1) up_points = np.stack((np.linspace(xc_l, xc_r, N_x), np.ones(N_x) * yc_u), 1) t_du = np.repeat(np.linspace(0, T_end, N_t), N_x).reshape(-1, 1) X_down = np.hstack((t_du, np.vstack([down_points for _ in range(N_t)]))) X_up = np.hstack((t_du, np.vstack([up_points for _ in range(N_t)]))) X_du = np.concatenate((X_down, X_up), 1) ud_idx = np.random.choice(len(X_du), size=N_bc, replace=False) X_du = X_du[ud_idx] X_bc_inner = (X_lr, X_du) return X_bc_outer, X_bc_inner
5941e213a7c48e39b79969d70b9a53a52207272f
21,416
def extractInfiniteNovelTranslations(item): """ # Infinite Novel Translations """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None tagmap = [ ('Ascendance of a Bookworm', 'Ascendance of a Bookworm', 'translated'), ('Yomigaeri no Maou', 'Yomigaeri no Maou', 'translated'), ('Kakei Senki wo Kakageyo!', 'Kakei Senki wo Kakageyo!', 'translated'), ('Kuro no Shoukan Samurai', 'Kuro no Shoukan Samurai', 'translated'), ('Nidoume no Jinsei wo Isekai de', 'Nidoume no Jinsei wo Isekai de', 'translated'), ('Hachi-nan', 'Hachinan tte, Sore wa Nai Deshou!', 'translated'), ('Summoned Slaughterer', 'Yobidasareta Satsuriku-sha', 'translated'), ('maou no utsuwa', 'Maou no Utsuwa', 'translated'), ('Maou no Ki', 'Maou no Ki', 'translated'), ('Imperial wars and my stratagems', 'Imperial Wars and my Stratagems', 'translated'), ('Kuro no Shoukanshi', 'Kuro no Shoukanshi', 'translated'), ('I work as Healer in Another World\'s Labyrinth City', 'I work as Healer in Another World\'s Labyrinth City', 'translated'), ('The Spearmaster and The Black Cat', 'The Spearmaster and The Black Cat', 'translated'), ('Hakai no Miko', 'Hakai no Miko', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
0b31fcb764840242869eb3aa224b5f04d28beff8
21,417
def fill_diagonal(matrix, value, k=0, unpadded_dim=None): """ Returns a matrix identical to `matrix` except that the `k'th` diagonal has been overwritten with the value `value`. Args: matrix: Matrix whose diagonal to fill. value: The value to fill the diagonal with. k: The diagonal to fill. unpadded_dim: If specified, only the `unpadded_dim x unpadded_dim` top left block will be filled. Returns: A copy of `matrix`, with the `k'th` diagonal replaced by `value`. """ replace_here = on_kth_diagonal(matrix.shape, k=k, unpadded_dim=unpadded_dim) replace_with = jnp.full(replace_here.shape[1], value) return jnp.where(replace_here, x=replace_with, y=matrix)
d3dddf35b70788d832b6df119de8ba2760bb7fa7
21,418
from typing import Optional from typing import Callable import functools def container_model(*, model: type, caption: str, icon: Optional[str]) -> Callable: """ ``container_model`` is an object that keeps together many different properties defined by the plugin and allows developers to build user interfaces in a declarative way similar to :func:`data_model`. ``container_model`` can also hold a reference to a :func:`data_model` declared from the plugin, making this object a parent for all new :func:`data_model` created. .. rubric:: **Application Required**: The following options are required when declaring a ``container_model``. :param caption: A text to be displayed over the Tree. :param icon: Name of the icon to be used over the Tree. :param model: A reference to a class decorated with :func:`data_model`. .. note:: Even though the icon parameter is required, it's not currently being used. .. rubric:: **Plugin defined**: Visual elements that allow the user to input information into the application, or to arrange better the user interface. :Input Fields: Visual elements that allow the user to provide input information into the application. :Layout: Elements that assist the developer to arrange input fields in meaningfully way. Check the section :ref:`visual elements <api-types-section>` to see all inputs available, and :ref:`layout elements<api-layout-section>` to see all layouts available. .. rubric:: Example myplugin.py .. code-block:: python @data_model(icon="", caption="My Child") class ChildModel: distance = Quantity(value=1, unit="m", caption="Distance") @container_model(icon='', caption='My Container', model=ChildModel) class MyModelContainer: my_string = String(value='Initial Value', caption='My String') @alfasim_sdk.hookimpl def alfasim_get_data_model_type(): return [MyModelContainer] .. image:: /_static/images/api/container_model_example_1_1.png :scale: 70% .. image:: /_static/images/api/container_model_example_1_2.png :scale: 70% .. image:: /_static/images/api/container_model_example_1_3.png :scale: 70% Container data also includes automatically two actions for the model: .. rubric:: Action: Create new Model An action that creates a new model inside the container selected, you can activate this action by right-clicking in the container over the Tree, or by clicking on the "Plus" icon available at the ``Model Explorer``. .. image:: /_static/images/api/container_model_new_model_1.png :scale: 80% .. image:: /_static/images/api/container_model_new_model_2.png :scale: 80% .. rubric:: Action: Remove An action that remove the selected model, only available for models inside a container, you can activate this action by right-clicking the model over the Tree, or by clicking on the "Trash" icon available at the ``Model Explorer``. .. image:: /_static/images/api/container_model_remove_1.png :scale: 80% .. image:: /_static/images/api/container_model_remove_2.png :scale: 80% """ def apply(class_): @functools.wraps(class_) def wrap_class(class_, caption, icon): return get_attr_class(class_, caption, icon, model) return wrap_class(class_, caption, icon) return apply
e06ad5ab45f75fcc02550497e290fb8c07193645
21,420
def ward_quick(G, feature, verbose = 0): """ Agglomerative function based on a topology-defining graph and a feature matrix. Parameters ---------- G graph instance, topology-defining graph feature: array of shape (G.V,dim_feature): some vectorial information related to the graph vertices Returns ------- t: weightForest instance, that represents the dendrogram of the data NOTE ---- Hopefully a quicker version A euclidean distance is used in the feature space Caveat : only approximate """ # basic check if feature.ndim==1: feature = np.reshape(feature, (-1, 1)) if feature.shape[0]!=G.V: raise ValueError, "Incompatible dimension for the\ feature matrix and the graph" Features = [np.ones(2*G.V), np.zeros((2*G.V, feature.shape[1])), np.zeros((2*G.V, feature.shape[1]))] Features[1][:G.V] = feature Features[2][:G.V] = feature**2 """ Features = [] for i in range(G.V): Features.append(np.reshape(feature[i],(1,feature.shape[1]))) """ n = G.V nbcc = G.cc().max()+1 # prepare a graph with twice the number of vertices K = _auxiliary_graph(G,Features) parent = np.arange(2*n-nbcc).astype(np.int) pop = np.ones(2*n-nbcc).astype(np.int) height = np.zeros(2*n-nbcc) linc = K.left_incidence() rinc = K.right_incidence() # iteratively merge clusters q = 0 while (q<n-nbcc): # 1. find the lightest edges aux = np.zeros(2*n) ape = np.nonzero(K.weights<np.infty) ape = np.reshape(ape,np.size(ape)) idx = np.argsort(K.weights[ape]) for e in range(n-nbcc-q): i,j = K.edges[ape[idx[e]],0], K.edges[ape[idx[e]],1] if aux[i]==1: break if aux[j]==1: break aux[i]=1 aux[j]=1 emax = np.maximum(e,1) for e in range(emax): m = ape[idx[e]] cost = K.weights[m] k = q+n #if K.weights[m]>=stop: break i = K.edges[m,0] j = K.edges[m,1] height[k] = cost if verbose: print q,i,j, m,cost # 2. remove the current edge K.edges[m,:] = -1 K.weights[m] = np.infty linc[i].remove(m) rinc[j].remove(m) ml = linc[j] if np.sum(K.edges[ml,1]==i)>0: m = ml[np.flatnonzero(K.edges[ml,1]==i)] K.edges[m,:] = -1 K.weights[m] = np.infty linc[j].remove(m) rinc[i].remove(m) # 3. merge the edges with third part edges parent[i] = k parent[j] = k for p in range(3): Features[p][k] = Features[p][i] + Features[p][j] """ totalFeatures = np.vstack((Features[i], Features[j])) Features.append(totalFeatures) Features[i] = [] Features[j] = [] """ linc,rinc = _remap(K, i, j, k, Features, linc, rinc) q+=1 # build a tree to encode the results t = WeightedForest(2*n-nbcc, parent, height) return t
a5c4e847bf6c70acfee1b5d5466b5310d40b528d
21,421
def conv3x3(in_planes, out_planes, stride=1, output_padding=0): """3x3 convolution transpose with padding""" return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, output_padding=output_padding, bias=False)
00c9b5123eaf408a1c4432b962739ec519851a59
21,422
def unwrap(func): """ Returns the object wrapped by decorators. """ def _is_wrapped(f): return hasattr(f, '__wrapped__') unwrapped_f = func while (_is_wrapped(unwrapped_f)): unwrapped_f = unwrapped_f.__wrapped__ return unwrapped_f
17aa0c8cc91578fd1187784ad0396ed91c5ec9b8
21,423
def get_payload_from_scopes(scopes): """ Get a dict to be used in JWT payload. Just merge this dict with the JWT payload. :type roles list[rest_jwt_permission.scopes.Scope] :return dictionary to be merged with the JWT payload :rtype dict """ return { get_setting("JWT_PAYLOAD_SCOPES_KEY"): [scope.identifier for scope in scopes] }
d2192d2eef5cf6e5cc28d2125bef94c438075884
21,424
from typing import Dict def missing_keys_4(data: Dict, lprint=print, eprint=print): """ Add keys: _max_eval_all_epoch, _max_seen_train, _max_seen_eval, _finished_experiment """ if "_finished_experiment" not in data: lprint(f"Add keys _finished_experiment ...") max_eval = -1 for k1, v1 in data["_eval_trace"].items(): for k2, v2 in v1.items(): max_eval += len(v2) max_train = -1 for k1, v1 in data["_train_trace"].items(): for k2, v2 in v1.items(): max_train += len(v2) data["_max_eval_all_epoch"] = max_eval data["_max_train_all_epoch"] = max_train data["_max_seen_train"] = max_seen_train = max(data["_train_trace"].keys()) data["_max_seen_eval"] = max_seen_eval = max(data["_eval_trace"].keys()) # Check if finished or no no_tasks = len(data["_task_info"]) epochs_per_task = data["_args"]["train"]["epochs_per_task"] should_train = no_tasks * epochs_per_task reached_max_train = should_train == max_train + 1 same_seen = data["_max_seen_train"] == data["_max_seen_eval"] all_final_tasks_evaluated = len(data["_eval_trace"][max_seen_eval]) == no_tasks data["_finished_experiment"] = reached_max_train \ and same_seen and all_final_tasks_evaluated return 1 return 0
ad8d3f7c19dd4eefa0db465dd52b5e8dc8f0bd1e
21,425
def translate(txt): """Takes a plain czech text as an input and returns its phonetic transcription.""" txt = txt.lower() txt = simple_replacement(txt) txt = regex_replacement(txt) txt = chain_replacement(txt) txt = grind(txt) return txt
a7f35b7be14dfed0d9a4e68e2e3113d97f2468cb
21,426
def struct_getfield_longlong(ffitype, addr, offset): """ Return the field of type ``ffitype`` at ``addr+offset``, casted to lltype.LongLong. """ value = _struct_getfield(lltype.SignedLongLong, addr, offset) return value
24ff5e6b35de48bccf810bb9b723852f1ab16fb6
21,427
def subscribe(request): """View to subscribe the logged in user to a channel""" if request.method == "POST": channels = set() users = set() for key in request.POST: if key.startswith("youtube-"): channel_id = key[8:] if models.YoutubeChannel.objects.filter(id=channel_id).exists(): channels.add(models.YoutubeChannel.objects.get(id=channel_id)) elif key.startswith("twitch-"): user_id = key[7:] if models.TwitchUser.objects.filter(id=user_id).exists(): users.add(models.TwitchUser.objects.get(id=user_id)) action = request.POST.get("action") if action == "Subscribe": for channel in channels: if not models.YoutubeSubscription.objects.filter(channel=channel, user=request.user).exists(): models.YoutubeSubscription.objects.create(channel=channel, user=request.user) for user in users: if not models.TwitchSubscription.objects.filter(channel=user, user=request.user).exists(): models.TwitchSubscription.objects.create(channel=user, user=request.user) elif action == "Unsubscribe" or action == "Remove from history": for channel in channels: for entry in models.YoutubeSubscription.objects.filter(channel=channel, user=request.user): entry.delete() for user in users: for entry in models.TwitchSubscription.objects.filter(channel=user, user=request.user): entry.delete() history = getattr(request.user, "subscriptionhistory", None) if action == "Remove from history" and history is not None: for channel in channels: history.youtube.remove(channel) for user in users: history.twitch.remove(user) return redirect("notifpy:subscriptions")
6ee833eb6536f7958f74f274a776b31fab7051dc
21,428
from typing import OrderedDict import json def eval_accuracies(hypotheses, references, sources=None, filename=None, mode='dev'): """An unofficial evalutation helper. Arguments: hypotheses: A mapping from instance id to predicted sequences. references: A mapping from instance id to ground truth sequences. copy_info: Map of id --> copy information. sources: Map of id --> input text sequence. filename: print_copy_info: """ assert (sorted(references.keys()) == sorted(hypotheses.keys())) # Compute BLEU scores _, bleu, ind_bleu = corpus_bleu(hypotheses, references) # Compute ROUGE scores rouge_calculator = Rouge() rouge_l, ind_rouge = rouge_calculator.compute_score(references, hypotheses) # Compute METEOR scores if mode == 'test': meteor_calculator = Meteor() meteor, _ = meteor_calculator.compute_score(references, hypotheses) else: meteor = 0 fw = open(filename, 'w') if filename else None for key in references.keys(): if fw: pred_i = hypotheses[key] logobj = OrderedDict() logobj['id'] = key if sources is not None: logobj['code'] = sources[key] logobj['predictions'] = pred_i logobj['references'] = references[key] logobj['bleu'] = ind_bleu[key] logobj['rouge_l'] = ind_rouge[key] print(json.dumps(logobj), file=fw) if fw: fw.close() return bleu * 100, rouge_l * 100, meteor * 100
ede152bb51fcb53574eec0dfb84b6ca971289d5d
21,430
from datetime import datetime def perform_login(db: Session, user: FidesopsUser) -> ClientDetail: """Performs a login by updating the FidesopsUser instance and creating and returning an associated ClientDetail.""" client: ClientDetail = user.client if not client: logger.info("Creating client for login") client, _ = ClientDetail.create_client_and_secret( db, user.permissions.scopes, user_id=user.id ) user.last_login_at = datetime.utcnow() user.save(db) return client
013875ba1ca30615690d8d477e1246174bdc6279
21,431
def _bytestring_to_textstring(bytestring: str, number_of_registers: int = 16) -> str: """Convert a bytestring to a text string. Each 16-bit register in the slave are interpreted as two characters (1 byte = 8 bits). For example 16 consecutive registers can hold 32 characters (32 bytes). Not much of conversion is done, mostly error checking. Args: * bytestring (str): The string from the slave. Length = 2*number_of_registers * number_of_registers (int): The number of registers allocated for the string. Returns: A the text string (str). Raises: TypeError, ValueError """ _check_int( number_of_registers, minvalue=1, maxvalue=_MAX_NUMBER_OF_REGISTERS_TO_READ, description="number of registers", ) max_characters = _NUMBER_OF_BYTES_PER_REGISTER * number_of_registers _check_string( bytestring, "byte string", minlength=max_characters, maxlength=max_characters ) textstring = bytestring return textstring
474ac26b8fb3e454ce2747300c42b86df988ecc8
21,433
import numpy def sigma_XH(elem,Teff=4500.,M_H=0.,SNR=100.,dr=None): """ NAME: sigma_XH PURPOSE: return uncertainty in a given element at specified effective temperature, metallicity and signal to noise ratio (functional form taken from Holtzman et al 2015) INPUT: elem - string element name following the ASPCAP star naming convention i.e. for DR12 carbon, string is 'C_H' Teff - effective temperature or array thereof in K, defaults to 4500 K M_H - metallicity or array thereof, defaults to 0 SNR - signal to noise ratio or array thereof, defaults to 100 dr - data release OUTPUT: float or array depending on shape of Teff, M_H and SNR input HISTORY: 2017-07-24 - Written - Price-Jones (UofT) """ if dr is None: dr=appath._default_dr() A,B,C,D = drcoeffs[dr][elem] logsig = A + B*((Teff-4500.)/1000.) + C*M_H + D*(SNR-100) return numpy.exp(logsig)
186974970505b21cb9978c8afcfbee1a9c0bf17c
21,434
from typing import Callable def lazy_load_command(import_path: str) -> Callable: """Create a lazy loader for command""" _, _, name = import_path.rpartition('.') def command(*args, **kwargs): func = import_string(import_path) return func(*args, **kwargs) command.__name__ = name # type: ignore return command
273e482412a079e5a59b84422ee409df7b3a7a1c
21,435
def tlam(func, tup): """Split tuple into arguments """ return func(*tup)
0e3a9b93b36795e6c11631f8c8852aba59724f88
21,436
from typing import Counter def sax_df_reformat(sax_data, sax_dict, meter_data, space_btw_saxseq=3): """"Function to format a SAX timeseries original data for SAX heatmap plotting.""" counts_nb = Counter(sax_dict[meter_data]) # Sort the counter dictionnary per value # source: https://stackoverflow.com/questions/613183/how-do-i-sort-a-dictionary-by-value counter = {k: v for k, v in sorted(counts_nb.items(), key=lambda item: item[1])} keys = counter.keys() new_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns) empty_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns, index=[' '] * space_btw_saxseq) for sax_seq in keys: if counter[sax_seq] > 10: empty_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns, index=[' '] * space_btw_saxseq) else: s2 = min(int(round(space_btw_saxseq*(counter[sax_seq]/5))), space_btw_saxseq) empty_sax_df = pd.DataFrame(columns=sax_data[meter_data].columns, index=[' ']*s2) # Obtaining sax indexes of corresponding profiles within dataframe indexes = [i for i, x in enumerate(sax_dict[meter_data]) if x == sax_seq] # returns all indexes # Formating a newdataframe from selected sax_seq df_block = sax_data[meter_data].iloc[indexes].copy() df_block["SAX"] = [sax_seq] * len(indexes) new_sax_df = pd.concat([df_block, empty_sax_df, new_sax_df], axis=0) # Reformated dataframe # Mapping the sax sequence to the data index_map_dictionary = dict() index_map_dictionary["SAX_seq"], index_map_dictionary["SAX_idx"] = [], [] for sax_seq in counter: indexes = [i for i, x in enumerate(new_sax_df["SAX"]) if x == sax_seq] # returns all indexes #index_map_dictionary["SAX_seq"].append(sax_seq) if counter[sax_seq] > 10: index_map_dictionary["SAX_seq"].append(sax_seq) else: index_map_dictionary["SAX_seq"].append(" ") index_map_dictionary["SAX_idx"].append(np.median(indexes)) # Droping the SAX column of the dataframe now that we have a mapping variable for it new_sax_df.drop("SAX", axis=1, inplace=True) return new_sax_df, index_map_dictionary
6e241979d673910da2acfd522d1c32a3f1d815a8
21,437
def filter_not_t(func): """ Transformation for Sequence.filter_not :param func: filter_not function :return: transformation """ return Transformation( "filter_not({0})".format(name(func)), partial(filterfalse, func), {ExecutionStrategies.PARALLEL}, )
af548f7cfa60f5b598ad3527d8eaabca09aed4e6
21,438
def get_task_by_id(id): """Return task by its ID""" return TaskJson.json_by_id(id)
c1b1a4137cdab853e7d6c02167b914367120972a
21,439
def priority(floors, elevator): """Priority for a State.""" priority = 3 - elevator for i, floor in enumerate(floors): priority += (3 - i) * len(floor) return priority
b65abac24fb85f50425f2adfd4d98786b41c9a2d
21,440
from typing import Union from typing import List def get_user_groups(user_id: Union[int, str]) -> List[UserSerializer]: """ 获取指定 User 的全部 Groups Args: user_id: 指定 User 的 {login} 或 {id} Returns: Group 列表, 语雀这里将 Group 均视作 User. """ uri = f'/users/{user_id}/groups' method = 'GET' anonymous = True return Request.send(method, uri, anonymous=anonymous)
de02631693c6b31c566f93ee4cdc96bee3db024a
21,441
def user_news_list(): """ 新闻列表 :return: """ user = g.user page = request.args.get("page") try: page = int(page) except Exception as e: current_app.logger.error(e) page = 1 # 查询 news_list = [] current_page = 1 total_page = 1 try: paginate = user.news_list.paginate(page, constants.OTHER_NEWS_PAGE_MAX_COUNT, False) news_list = paginate.items current_page = paginate.page total_page = paginate.pages except Exception as e: current_app.logger.error(e) news_dict_li = [news.to_review_dict() for news in news_list] data = { "news_dict_li": news_dict_li, "current_page": current_page, "total_page": total_page } print(news_list) return render_template("news/user_news_list.html", data=data)
adc202bfdbf2c2e2d7d60c949fdad028a56b63c0
21,442
def unificate_link(link): """Process whitespace, make first letter upper.""" link = process_link_whitespace(link) if len(link) < 2: return link.upper() else: return link[0].upper() + link[1:]
4d9a5a4a88141f2a8e6400186c607615470cabde
21,443
def compute_vel_acc( robo, symo, antRj, antPj, forced=False, gravity=True, floating=False ): """Internal function. Computes speeds and accelerations usitn Parameters ========== robo : Robot Instance of robot description container symo : symbolmgr.SymbolManager Instance of symbolic manager """ #init velocities and accelerations w = ParamsInit.init_w(robo) wdot, vdot = ParamsInit.init_wv_dot(robo, gravity) # decide first link first_link = 1 if floating or robo.is_floating or robo.is_mobile: first_link = 0 #init auxilary matrix U = ParamsInit.init_u(robo) for j in xrange(first_link, robo.NL): if j == 0: w[j] = symo.mat_replace(w[j], 'W', j) wdot[j] = symo.mat_replace(wdot[j], 'WP', j) vdot[j] = symo.mat_replace(vdot[j], 'VP', j) dv0 = ParamsInit.product_combinations(w[j]) symo.mat_replace(dv0, 'DV', j) hatw_hatw = Matrix([ [-dv0[3]-dv0[5], dv0[1], dv0[2]], [dv0[1], -dv0[5]-dv0[0], dv0[4]], [dv0[2], dv0[4], -dv0[3]-dv0[0]] ]) U[j] = hatw_hatw + tools.skew(wdot[j]) symo.mat_replace(U[j], 'U', j) else: jRant = antRj[j].T qdj = Z_AXIS * robo.qdot[j] qddj = Z_AXIS * robo.qddot[j] wi, w[j] = _omega_ij(robo, j, jRant, w, qdj) symo.mat_replace(w[j], 'W', j) symo.mat_replace(wi, 'WI', j) _omega_dot_j(robo, j, jRant, w, wi, wdot, qdj, qddj) symo.mat_replace(wdot[j], 'WP', j, forced) _v_dot_j(robo, symo, j, jRant, antPj, w, wi, wdot, U, vdot, qdj, qddj) symo.mat_replace(vdot[j], 'VP', j, forced) return w, wdot, vdot, U
474729b9329ee21d4bcfffb33916d8d85a21ea62
21,444
def _sample_n_k(n, k): """Sample k distinct elements uniformly from range(n)""" if not 0 <= k <= n: raise ValueError("Sample larger than population or is negative") if 3 * k >= n: return np.random.choice(n, k, replace=False) else: result = np.random.choice(n, 2 * k) selected = set() selected_add = selected.add j = k for i in range(k): x = result[i] while x in selected: x = result[i] = result[j] j += 1 if j == 2 * k: # This is slow, but it rarely happens. result[k:] = np.random.choice(n, k) j = k selected_add(x) return result[:k]
3aad3ed36590655ef079a4d39745d6c59ec499a8
21,445
def _all_usage_keys(descriptors, aside_types): """ Return a set of all usage_ids for the `descriptors` and for as all asides in `aside_types` for those descriptors. """ usage_ids = set() for descriptor in descriptors: usage_ids.add(descriptor.scope_ids.usage_id) for aside_type in aside_types: usage_ids.add(AsideUsageKeyV1(descriptor.scope_ids.usage_id, aside_type)) usage_ids.add(AsideUsageKeyV2(descriptor.scope_ids.usage_id, aside_type)) return usage_ids
75652e9468e6a61763b407bf11d644b1d08dd38c
21,446
def svn_client_invoke_get_commit_log2(*args): """svn_client_invoke_get_commit_log2(svn_client_get_commit_log2_t _obj, apr_array_header_t commit_items, void * baton, apr_pool_t pool) -> svn_error_t""" return _client.svn_client_invoke_get_commit_log2(*args)
fe7652c7e1573c3d688ddde40630b9b24e5bb48c
21,447
def round_extent(extent, cellsize): """Increases the extent until all sides lie on a coordinate divisible by cellsize.""" xmin, ymin, xmax, ymax = extent xmin = np.floor(xmin / cellsize) * cellsize ymin = np.floor(ymin / cellsize) * cellsize xmax = np.ceil(xmax / cellsize) * cellsize ymax = np.ceil(ymax / cellsize) * cellsize return xmin, ymin, xmax, ymax
384cf262f5dd206b0755623ce6d859e4f82efa86
21,448
def add_numbers(): """Add two numbers server side, ridiculous but well...""" #a = request.args.get('a', 0, type=str)#input from html a = request.args.get('a') print(a) result = chatbot.main(a) print("Result: ", result) #input from html returned=a #return jsonify(returned); return jsonify(''.join(result)) #return jsonify(result = returned[0])#return something back
d0e670ea0fc7bff33d5419316f5ebddf12cecea0
21,449
import re def _parse_stack_info(line, re_obj, crash_obj, line_num): """ :param line: line string :param re_obj: re compiled object :param crash_obj: CrashInfo object :return: crash_obj, re_obj, complete:Bool """ if re_obj is None: re_obj = re.compile(_match_stack_item_re()) complete = False match_obj = re_obj.match(line) if match_obj is not None: stack_item = StackItemInfo() stack_item.name = match_obj.group(1) stack_item.invoke_address = match_obj.group(2) stack_item.load_address = match_obj.group(3) stack_item.line_num = line_num crash_obj.function_stacks.append(stack_item) elif re.match(_match_image_header_re(), line) is not None: complete = True re_obj = None return (crash_obj, re_obj, complete)
b360ef9c6d96092f59952fec90fdc41b2463c780
21,450
import math def Orbiter(pos,POS,veloc,MASS,mass): """ Find the new position and velocity of an Orbiter Parameters ---------- pos : list Position vector of the orbiter. POS : list Position vector of the centre object. veloc : list Velocity of the orbiter. MASS : int Mass of the centre object. mass : int Mass of the orbiter. Returns ------- list Returns a list of two vectors, first being the new position vector and the second being the new velocity vector. """ # finding the orbital radius rad=math.sqrt(((pos[0]+POS[0])**2)+((pos[1]-POS[1])**2)) # getting the acceleration # acc=(G*MASS*rad)/abs(rad)**3 acc=[(-(G*MASS)/(rad**2))*((pos[0]-POS[0])/rad),(-(G*MASS)/(rad**2))*((pos[1]-POS[1])/rad)] #(pos[i]/rad) being the unit vector # getting the new velocity vector veloc+=[acc[0]*timeFrameLength,acc[1]*timeFrameLength] for i in range(2): veloc[i]+=acc[i]*timeFrameLength # veloc[0]+=(acc*timeFrameLength)*((pos[0]-POS[0])/rad) #(pos[i]/rad) being to make it go towards the object # veloc[1]+=(acc*timeFrameLength)*((pos[1]-POS[1])/rad) #(pox`s[i]/rad) being to make it go towards the object # for i in range(2): # veloc[i]+=(acc*timeFrameLength)*((pos[i]+POS[i])/rad) #(pos[i]/rad) being to make it go towards the object # getting the new position for i in range(2): pos[i]+=veloc[i]*timeFrameLength return [pos,veloc]
8d6c08fc1a7fa1165550e13944c1dbda414e6e62
21,451
def build_heading(win, readonly=False): """Generate heading text for screen """ if not win.parent().albumdata['artist'] or not win.parent().albumdata['titel']: text = 'Opvoeren nieuw {}'.format(TYPETXT[win.parent().albumtype]) else: wintext = win.heading.text() newtext = '' for text in ('tracks', 'opnames'): if wintext == text: newtext = ': {}'.format(wintext) break elif wintext.endswith(text): newtext = ': {}'.format(text) break text = 'G' if readonly else 'Wijzigen g' text = '{}egevens van {} {} - {} {}'.format( text, TYPETXT[win.parent().albumtype], win.parent().albumdata['artist'], win.parent().albumdata['titel'], newtext) return text
133f4111171ab0bd04bed82455ced9aa9dcc010b
21,452
def GetTraceValue(): """Return a value to be used for the trace header.""" # Token to be used to route service request traces. trace_token = properties.VALUES.core.trace_token.Get() # Username to which service request traces should be sent. trace_email = properties.VALUES.core.trace_email.Get() # Enable/disable server side logging of service requests. trace_log = properties.VALUES.core.trace_log.GetBool() if trace_token: return 'token:{0}'.format(trace_token) elif trace_email: return 'email:{0}'.format(trace_email) elif trace_log: return 'log' return None
67c1fc9d0602ca25c02dd088e1abba1ad951022f
21,453
from typing import Optional from typing import Union from typing import Tuple def sql( where: str, parameters: Optional[Parameters] = None ) -> Union[str, Tuple[str, Parameters]]: """ Return a SQL query, usable for querying the TransitMaster database. If provided, parameters are returned duplicated, to account for the face that the WHERE clause is also duplicated. """ formatted = SQL.format(where=where) if parameters is None: return formatted return (formatted, parameters + parameters)
22ca2194f355deaa4fc55b458c1f1a013ab2902e
21,455
def clip(a, a_min, a_max): """Clips the values of an array to a given interval. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of ``[0, 1]`` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Args: a (~chainerx.ndarray): Array containing elements to clip. a_min (scalar): Maximum value. a_max (scalar): Minimum value. Returns: ~chainerx.ndarray: An array with the elements of ``a``, but where values < ``a_min`` are replaced with ``a_min``, and those > ``a_max`` with ``a_max``. Note: The :class:`~chainerx.ndarray` typed ``a_min`` and ``a_max`` are not supported yet. Note: During backpropagation, this function propagates the gradient of the output array to the input array ``a``. .. seealso:: :func:`numpy.clip` """ if a_min is None and a_max is None: raise ValueError('Must set either a_min or a_max.') if a_min is not None: a = chainerx.maximum(a, a_min) if a_max is not None: a = chainerx.minimum(a, a_max) return a
0394b68329c48198ade9a3131c6c26940f09a154
21,456
def hole_eigenvalue_residual( energy: floatarray, particle: "CoreShellParticle" ) -> float: """This function returns the residual of the hole energy level eigenvalue equation. Used with root-finding methods to calculate the lowest energy state. Parameters ---------- energy : float, eV The energy for which to calculate the wavevector of a hole in in the nanoparticle. particle : CoreShellParticle The particle for which to calculate the hole wavevectors. We pass in the particle directly since there are a lot of parameters to pass in and this keeps the interface clean. References ---------- .. [1] Piryatinski, A., Ivanov, S. A., Tretiak, S., & Klimov, V. I. (2007). Effect of Quantum and Dielectric Confinement on the Exciton−Exciton Interaction Energy in Type II Core/Shell Semiconductor Nanocrystals. Nano Letters, 7(1), 108–115. https://doi.org/10.1021/nl0622404 .. [2] Li, L., Reiss, P., & Protie, M. (2009). Core / Shell Semiconductor Nanocrystals, (2), 154–168. https://doi.org/10.1002/smll.200800841 """ core_hole_wavenumber, shell_hole_wavenumber = (None, None) if particle.type_one: core_hole_wavenumber = wavenumber_from_energy(energy, particle.cmat.m_h) shell_hole_wavenumber = wavenumber_from_energy( energy, particle.smat.m_h, potential_offset=particle.uh ) elif particle.type_one_reverse: core_hole_wavenumber = wavenumber_from_energy( energy, particle.cmat.m_h, potential_offset=particle.uh ) shell_hole_wavenumber = wavenumber_from_energy(energy, particle.smat.m_h) elif particle.type_two: if particle.e_h: core_hole_wavenumber = wavenumber_from_energy( energy, particle.cmat.m_h, potential_offset=particle.uh ) shell_hole_wavenumber = wavenumber_from_energy(energy, particle.smat.m_h) elif particle.h_e: core_hole_wavenumber = wavenumber_from_energy(energy, particle.cmat.m_h) shell_hole_wavenumber = wavenumber_from_energy( energy, particle.smat.m_h, potential_offset=particle.uh ) core_x = core_hole_wavenumber * particle.core_width shell_x = shell_hole_wavenumber * particle.shell_width core_width = particle.core_width shell_width = particle.shell_width mass_ratio = particle.smat.m_h / particle.cmat.m_h if type(core_x) in [np.float64, np.complex128]: return np.real( (1 - 1 / _tanxdivx(core_x)) * mass_ratio - 1 - 1 / _tanxdivx(shell_x) * core_width / shell_width ) else: return np.real( (1 - 1 / tanxdivx(core_x)) * mass_ratio - 1 - 1 / tanxdivx(shell_x) * core_width / shell_width )
500033e927c29595c67d2e2327ebe1ae6d39cfd0
21,457
def open_raster(filename): """Take a file path as a string and return a gdal datasource object""" # register all of the GDAL drivers gdal.AllRegister() # open the image img = gdal.Open(filename, GA_ReadOnly) if img is None: print 'Could not open %s' % filename sys.exit(1) else: return img
b1c002be50b59e74a327943af8613b11cddf9b88
21,458
def reduce2latlon_seasonal( mv, season=seasonsyr, region=None, vid=None, exclude_axes=[], seasons=seasonsyr ): """as reduce2lat_seasonal, but both lat and lon axes are retained. Axis names (ids) may be listed in exclude_axes, to exclude them from the averaging process. """ # backwards compatibility with old keyword 'seasons': if seasons!=seasonsyr: season = seasons return reduce2any( mv, target_axes=['x','y'], season=season, region=region, vid=vid, exclude_axes=exclude_axes )
7f101ce4ac5d4382d287901607c455b4d922f847
21,459
def GetFreshAccessTokenIfEnabled(account=None, scopes=None, min_expiry_duration='1h', allow_account_impersonation=True): """Returns a fresh access token of the given account or the active account. Same as GetAccessTokenIfEnabled except that the access token returned by this function is valid for at least min_expiry_duration. Args: account: str, The account to get the access token for. If None, the account stored in the core.account property is used. scopes: tuple, Custom auth scopes to request. By default CLOUDSDK_SCOPES are requested. min_expiry_duration: Duration str, Refresh the token if they are within this duration from expiration. Must be a valid duration between 0 seconds and 1 hour (e.g. '0s' >x< '1h'). allow_account_impersonation: bool, True to allow use of impersonated service account credentials (if that is configured). """ if properties.VALUES.auth.disable_credentials.GetBool(): return None return GetFreshAccessToken(account, scopes, min_expiry_duration, allow_account_impersonation)
7716b44802d84aac1952e936166f3414459cbc4b
21,460
def unicode_to_xes(uni): """Convert unicode characters to our ASCII representation of patterns.""" uni = uni.replace(INVISIBLE_CRAP, '') return ''.join(BOXES[c] for c in uni)
4c6eebcf562804340ef683eec84e28002202d833
21,461
def AvailableSteps(): """(read-only) Number of Steps available in cap bank to be switched ON.""" return lib.Capacitors_Get_AvailableSteps()
210f1316beafcdef266858490411bb9f737cb3de
21,462
import re def modify_list(result, guess, answer): """ Print all the key in dict. Arguments: result -- a list of the show pattern word. guess -- the letter of user's guess. answer -- the answer of word Returns: result -- the list of word after modified. """ guess = guess.lower() answer = answer.lower() if guess in answer: index_list = [x.start() for x in re.finditer(guess, answer)] for i in index_list: result[i] = guess.upper() else: print("Letter '{}' is not in the word".format(guess.upper())) print(' '.join(result)) return result
9384ecd09659c55808a859dd613641ccac46c760
21,463
def f(p, snm, sfs): """ p: proportion of all SNP's on the X chromosome [float, 0<p<1] snm: standard neutral model spectrum (optimally scaled) sfs: observed SFS """ # modify sfs fs = modify(p, sfs) # return sum of squared deviations of modified SFS with snm spectrum: return np.sum( (fs - snm)**2 )
b7d3c8ef188a5126fe7b817c78949fb9feec5b62
21,464
def get_N_intransit(tdur, cadence): """Estimates number of in-transit points for transits in a light curve. Parameters ---------- tdur: float Full transit duration cadence: float Cadence/integration time for light curve Returns ------- n_intransit: int Number of flux points in each transit """ n_intransit = tdur//cadence return n_intransit
d126b5590a8997b8695c1a86360421f2bf4b8357
21,465