content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import math def overlay(*fields: SampledField or Tensor) -> Tensor: """ Specify that multiple fields should be drawn on top of one another in the same figure. The fields will be plotted in the order they are given, i.e. the last field on top. ```python vis.plot(vis.overlay(heatmap, points, velocity)) ``` Args: *fields: `SampledField` or `Tensor` instances Returns: Plottable object """ return math.layout(fields, math.channel('overlay'))
629eb69c15d814384b70bac2ac78cebdd93ef20d
27,520
def load_identifier(value): """load identifier""" if value == "y": return True return False
0292439c8eeb3788a6b517bb7b340df2c0435b4d
27,521
def IKinSpaceConstrained(screw_list, ee_home, ee_goal, theta_list, position_tolerance, rotation_tolerance, joint_mins, joint_maxs, max_iterations): """ Calculates IK to a certain goal within joint rotation constraints Args: screw_list: screw list ee_home: home end effector position ee_goal: Goal Position theta_list: Initial thetas position_tolerance: Positional tolerance rotation_tolerance: Rotational tolerance joint_mins: joint minimum rotations joint_maxs: joint maximum rotations max_iterations: Maximum Iterations before failure Returns: ndarray: joint configuration boolean: success """ ee_current = FKinSpace(ee_home, screw_list, theta_list) error_vec = np.dot(Adjoint(ee_current), se3ToVec(MatrixLog6(np.dot(TransInv(ee_current), ee_goal)))) #print(mhp.MatrixLog6(np.dot(mhp.TransInv(ee_current), ee_goal)), "Test") error_bool = (np.linalg.norm(error_vec[0:3]) > position_tolerance or np.linalg.norm(error_vec[3:6]) > rotation_tolerance) #if np.isnan(error_vec).any(): # error_bool = True i = 0 while error_bool and i < max_iterations: jacobian_space = JacobianSpace(screw_list, theta_list) inverse_jacobian_space = np.linalg.pinv(jacobian_space) new_theta = np.dot(inverse_jacobian_space, error_vec) theta_list = theta_list + new_theta for j in range(len(theta_list)): if theta_list[j] < joint_mins[j]: theta_list[j] = joint_mins[j] if theta_list[j] > joint_maxs[j]: theta_list[j] = joint_maxs[j]; i = i + 1 ee_current = FKinSpace(ee_home, screw_list, theta_list) error_vec = np.dot(Adjoint(ee_current), se3ToVec(MatrixLog6(np.dot(TransInv(ee_current), ee_goal)))) error_bool = (np.linalg.norm(error_vec[0:3]) > position_tolerance or np.linalg.norm(error_vec[3:6]) > rotation_tolerance) #if np.isnan(error_vec).any(): # error_bool = True success = not error_bool return theta_list, success
7c59a8eaa7cff24f6c5359b100e006bbc58e8c00
27,522
def add_hidden_range(*args): """ add_hidden_range(ea1, ea2, description, header, footer, color) -> bool Mark a range of addresses as hidden. The range will be created in the invisible state with the default color @param ea1: linear address of start of the address range (C++: ea_t) @param ea2: linear address of end of the address range (C++: ea_t) @param description: range parameters (C++: const char *) @param header: range parameters (C++: const char *) @param footer: range parameters (C++: const char *) @param color (C++: bgcolor_t) @return: success """ return _ida_bytes.add_hidden_range(*args)
8f844c14b348bdcf58abe799cb8ed0c91d00aeef
27,523
import json def _fetch_certs(request, certs_url): """Fetches certificates. Google-style cerificate endpoints return JSON in the format of ``{'key id': 'x509 certificate'}``. Args: request (google.auth.transport.Request): The object used to make HTTP requests. certs_url (str): The certificate endpoint URL. Returns: Mapping[str, str]: A mapping of public key ID to x.509 certificate data. """ response = request(certs_url, method='GET') if response.status != http_client.OK: raise exceptions.TransportError( 'Could not fetch certificates at {}'.format(certs_url)) return json.loads(response.data.decode('utf-8'))
3141c78d604bbed10236b5ed11cfc2c06a756ef2
27,524
def get_mock_personalization_dict(): """Get a dict of personalization mock.""" mock_pers = dict() mock_pers['to_list'] = [To("[email protected]", "Example User"), To("[email protected]", "Example User")] mock_pers['cc_list'] = [To("[email protected]", "Example User"), To("[email protected]", "Example User")] mock_pers['bcc_list'] = [To("[email protected]"), To("[email protected]")] mock_pers['subject'] = ("Hello World from the Personalized " "SendGrid Python Library") mock_pers['headers'] = [Header("X-Test", "test"), Header("X-Mock", "true")] mock_pers['substitutions'] = [Substitution("%name%", "Example User"), Substitution("%city%", "Denver")] mock_pers['custom_args'] = [CustomArg("user_id", "343"), CustomArg("type", "marketing")] mock_pers['send_at'] = 1443636843 return mock_pers
4be81a67715bc967c8d624d784008ed3cb6775f8
27,525
def load_python_bindings(python_input): """ Custom key bindings. """ bindings = KeyBindings() sidebar_visible = Condition(lambda: python_input.show_sidebar) handle = bindings.add @handle("c-l") def _(event): """ Clear whole screen and render again -- also when the sidebar is visible. """ event.app.renderer.clear() @handle("c-z") def _(event): """ Suspend. """ if python_input.enable_system_bindings: event.app.suspend_to_background() # Delete word before cursor, but use all Python symbols as separators # (WORD=False). handle("c-w")(get_by_name("backward-kill-word")) @handle("f2") def _(event): """ Show/hide sidebar. """ python_input.show_sidebar = not python_input.show_sidebar if python_input.show_sidebar: event.app.layout.focus(python_input.ptpython_layout.sidebar) else: event.app.layout.focus_last() @handle("f3") def _(event): """ Select from the history. """ python_input.enter_history() @handle("f4") def _(event): """ Toggle between Vi and Emacs mode. """ python_input.vi_mode = not python_input.vi_mode @handle("f6") def _(event): """ Enable/Disable paste mode. """ python_input.paste_mode = not python_input.paste_mode @handle( "tab", filter=~sidebar_visible & ~has_selection & tab_should_insert_whitespace ) def _(event): """ When tab should insert whitespace, do that instead of completion. """ event.app.current_buffer.insert_text(" ") @Condition def is_multiline(): return document_is_multiline_python(python_input.default_buffer.document) @handle( "enter", filter=~sidebar_visible & ~has_selection & (vi_insert_mode | emacs_insert_mode) & has_focus(DEFAULT_BUFFER) & ~is_multiline, ) @handle(Keys.Escape, Keys.Enter, filter=~sidebar_visible & emacs_mode) def _(event): """ Accept input (for single line input). """ b = event.current_buffer if b.validate(): # When the cursor is at the end, and we have an empty line: # drop the empty lines, but return the value. b.document = Document( text=b.text.rstrip(), cursor_position=len(b.text.rstrip()) ) b.validate_and_handle() @handle( "enter", filter=~sidebar_visible & ~has_selection & (vi_insert_mode | emacs_insert_mode) & has_focus(DEFAULT_BUFFER) & is_multiline, ) def _(event): """ Behaviour of the Enter key. Auto indent after newline/Enter. (When not in Vi navigaton mode, and when multiline is enabled.) """ b = event.current_buffer empty_lines_required = python_input.accept_input_on_enter or 10000 def at_the_end(b): """we consider the cursor at the end when there is no text after the cursor, or only whitespace.""" text = b.document.text_after_cursor return text == "" or (text.isspace() and not "\n" in text) if python_input.paste_mode: # In paste mode, always insert text. b.insert_text("\n") elif at_the_end(b) and b.document.text.replace(" ", "").endswith( "\n" * (empty_lines_required - 1) ): # When the cursor is at the end, and we have an empty line: # drop the empty lines, but return the value. if b.validate(): b.document = Document( text=b.text.rstrip(), cursor_position=len(b.text.rstrip()) ) b.validate_and_handle() else: auto_newline(b) @handle( "c-d", filter=~sidebar_visible & has_focus(python_input.default_buffer) & Condition( lambda: # The current buffer is empty. not get_app().current_buffer.text ), ) def _(event): """ Override Control-D exit, to ask for confirmation. """ if python_input.confirm_exit: # Show exit confirmation and focus it (focusing is important for # making sure the default buffer key bindings are not active). python_input.show_exit_confirmation = True python_input.app.layout.focus( python_input.ptpython_layout.exit_confirmation ) else: event.app.exit(exception=EOFError) @handle("c-c", filter=has_focus(python_input.default_buffer)) def _(event): " Abort when Control-C has been pressed. " event.app.exit(exception=KeyboardInterrupt, style="class:aborting") return bindings
2725352272d001da7dc74e7c29819b8ddae5521e
27,526
def batch_retrieve_pipeline_s3(pipeline_upload): """ Data is returned in the form (chunk_object, file_data). """ study = Study.objects.get(id = pipeline_upload.study_id) return pipeline_upload, s3_retrieve(pipeline_upload.s3_path, study.object_id, raw_path=True)
9f48186a116fab7826dd083ab80e4f5383e813ba
27,527
def gat(gw, feature, hidden_size, activation, name, num_heads=8, feat_drop=0.6, attn_drop=0.6, is_test=False): """Implementation of graph attention networks (GAT) This is an implementation of the paper GRAPH ATTENTION NETWORKS (https://arxiv.org/abs/1710.10903). Args: gw: Graph wrapper object (:code:`StaticGraphWrapper` or :code:`GraphWrapper`) feature: A tensor with shape (num_nodes, feature_size). hidden_size: The hidden size for gat. activation: The activation for the output. name: Gat layer names. num_heads: The head number in gat. feat_drop: Dropout rate for feature. attn_drop: Dropout rate for attention. is_test: Whether in test phrase. Return: A tensor with shape (num_nodes, hidden_size * num_heads) """ def send_attention(src_feat, dst_feat, edge_feat): output = src_feat["left_a"] + dst_feat["right_a"] output = fluid.layers.leaky_relu( output, alpha=0.2) # (num_edges, num_heads) return {"alpha": output, "h": src_feat["h"]} def reduce_attention(msg): alpha = msg["alpha"] # lod-tensor (batch_size, seq_len, num_heads) h = msg["h"] alpha = paddle_helper.sequence_softmax(alpha) old_h = h h = fluid.layers.reshape(h, [-1, num_heads, hidden_size]) alpha = fluid.layers.reshape(alpha, [-1, num_heads, 1]) if attn_drop > 1e-15: alpha = fluid.layers.dropout( alpha, dropout_prob=attn_drop, is_test=is_test, dropout_implementation="upscale_in_train") h = h * alpha h = fluid.layers.reshape(h, [-1, num_heads * hidden_size]) h = fluid.layers.lod_reset(h, old_h) return fluid.layers.sequence_pool(h, "sum") if feat_drop > 1e-15: feature = fluid.layers.dropout( feature, dropout_prob=feat_drop, is_test=is_test, dropout_implementation='upscale_in_train') ft = fluid.layers.fc(feature, hidden_size * num_heads, bias_attr=False, param_attr=fluid.ParamAttr(name=name + '_weight')) left_a = fluid.layers.create_parameter( shape=[num_heads, hidden_size], dtype='float32', name=name + '_gat_l_A') right_a = fluid.layers.create_parameter( shape=[num_heads, hidden_size], dtype='float32', name=name + '_gat_r_A') reshape_ft = fluid.layers.reshape(ft, [-1, num_heads, hidden_size]) left_a_value = fluid.layers.reduce_sum(reshape_ft * left_a, -1) right_a_value = fluid.layers.reduce_sum(reshape_ft * right_a, -1) msg = gw.send( send_attention, nfeat_list=[("h", ft), ("left_a", left_a_value), ("right_a", right_a_value)]) output = gw.recv(msg, reduce_attention) bias = fluid.layers.create_parameter( shape=[hidden_size * num_heads], dtype='float32', is_bias=True, name=name + '_bias') bias.stop_gradient = True output = fluid.layers.elementwise_add(output, bias, act=activation) return output
c653dc26dc2bb1dccf37481560d93ba7eee63f7c
27,529
def _number(string): """ Extracts an int from a string. Returns a 0 if None or an empty string was passed. """ if not string: return 0 else: try: return int(string) except ValueError: return float(string)
d14a7a04f33f36efd995b74bc794fce2c5f4be97
27,530
def get_email(sciper): """ Return email of user """ attribute = 'mail' response = LDAP_search( pattern_search='(uniqueIdentifier={})'.format(sciper), attribute=attribute ) try: email = get_attribute(response, attribute) except Exception: raise EpflLdapException("No email address corresponds to sciper {}".format(sciper)) return email
5f9ce9f69e4e7c211f404a50b4f420eb6e978d64
27,531
import math def convert_size(size): """ Size should be in bytes. Return a tuple (float_or_int_val, str_unit) """ if size == 0: return (0, "B") KILOBYTE = 1024 size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size, KILOBYTE))) p = math.pow(KILOBYTE, i) result = round(size/p, 2) return (result, size_name[i])
b2bc18df8ae268e1b03bcc7addadca8e07309f7c
27,532
def _backward(gamma, mask): """Backward recurrence of the linear chain crf.""" gamma = K.cast(gamma, 'int32') # (B, T, N) def _backward_step(gamma_t, states): # print('len(states)=', len(states)) # print(type(states)) # y_tm1 = K.squeeze(states[0], 0) y_tm1 = states[0] y_t = batch_gather(gamma_t, y_tm1) # return y_t, [K.expand_dims(y_t, 0)] # return K.expand_dims(y_t, 0), [K.expand_dims(y_t, 0)] return y_t, [y_t] # initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)] # (1, B) initial_states = [K.zeros_like(gamma[:, 0, 0])] # (1, B) _, y_rev, _ = K.rnn(_backward_step, gamma, initial_states, go_backwards=True) y = K.reverse(y_rev, 1) if mask is not None: mask = K.cast(mask, dtype='int32') # mask output y *= mask # set masked values to -1 y += -(1 - mask) return y
ab71548e87023e09ccd28aac81b95a6671384205
27,533
def t3err(viserr, N=7): """ provided visibilities, this put these into triple product""" amparray = populate_symmamparray(viserr, N=N) t3viserr = np.zeros(int(comb(N,3))) nn=0 for kk in range(N-2): for ii in range(N-kk-2): for jj in range(N-kk-ii-2): t3viserr[nn+jj] = np.sqrt(amparray[kk,ii+kk+1]**2 \ + amparray[ii+kk+1,jj+ii+kk+2]**2 \ + amparray[jj+ii+kk+2,kk]**2 ) nn=nn+jj+1 return t3viserr
f5d774bb361c389f470de504adc2a467c71f0ec7
27,534
def safe_sign_and_autofill_transaction( transaction: Transaction, wallet: Wallet, client: Client ) -> Transaction: """ Signs a transaction locally, without trusting external rippled nodes. Autofills relevant fields. Args: transaction: the transaction to be signed. wallet: the wallet with which to sign the transaction. client: a network client. Returns: The signed transaction. """ return safe_sign_transaction(_autofill_transaction(transaction, client), wallet)
ce2b898529e15c65c8c84906c12093b9865e4768
27,536
def getMetricValue(glyph, attr): """ Get the metric value for an attribute. """ attr = getAngledAttrIfNecessary(glyph.font, attr) return getattr(glyph, attr)
9637e8c1ee3e295b1d1da67d9a77108b761acddc
27,537
from typing import Optional from typing import Union def temporal_train_test_split( y: ACCEPTED_Y_TYPES, X: Optional[pd.DataFrame] = None, test_size: Optional[Union[int, float]] = None, train_size: Optional[Union[int, float]] = None, fh: Optional[FORECASTING_HORIZON_TYPES] = None, ) -> SPLIT_TYPE: """Split arrays or matrices into sequential train and test subsets. Creates train/test splits over endogenous arrays an optional exogenous arrays. This is a wrapper of scikit-learn's ``train_test_split`` that does not shuffle the data. Parameters ---------- y : pd.Series Target series X : pd.DataFrame, optional (default=None) Exogenous data test_size : float, int or None, optional (default=None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split. If int, represents the relative number of test samples. If None, the value is set to the complement of the train size. If ``train_size`` is also None, it will be set to 0.25. train_size : float, int, or None, (default=None) If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the train split. If int, represents the relative number of train samples. If None, the value is automatically set to the complement of the test size. fh : ForecastingHorizon Returns ------- splitting : tuple, length=2 * len(arrays) List containing train-test split of `y` and `X` if given. References ---------- ..[1] adapted from https://github.com/alkaline-ml/pmdarima/ """ if fh is not None: if test_size is not None or train_size is not None: raise ValueError( "If `fh` is given, `test_size` and `train_size` cannot " "also be specified." ) return _split_by_fh(y, fh, X=X) else: pd_format = isinstance(y, pd.Series) or isinstance(y, pd.DataFrame) if pd_format is True and isinstance(y.index, pd.MultiIndex): ys = get_time_index(y) # Get index to group across (only indices other than timepoints index) yi_name = y.index.names yi_grp = yi_name[0:-1] # Get split into test and train data for timeindex only series = (ys,) yret = _train_test_split( *series, shuffle=False, stratify=None, test_size=test_size, train_size=train_size, ) # Convert into list indices ysl = ys.to_list() yrl1 = yret[0].to_list() yrl2 = yret[1].to_list() p1 = [index for (index, item) in enumerate(ysl) if item in yrl1] p2 = [index for (index, item) in enumerate(ysl) if item in yrl2] # Subset by group based on identified indices y_train = y.groupby(yi_grp, as_index=False).nth(p1) y_test = y.groupby(yi_grp, as_index=False).nth(p2) if X is not None: X_train = X.groupby(yi_grp, as_index=False).nth(p1) X_test = X.groupby(yi_grp, as_index=False).nth(p2) return y_train, y_test, X_train, X_test else: return y_train, y_test else: series = (y,) if X is None else (y, X) return _train_test_split( *series, shuffle=False, stratify=None, test_size=test_size, train_size=train_size, )
4bb59ead5a035114f067ab37c40289d0d660df2d
27,538
import re def split_range_str(range_str): """ Split the range string to bytes, start and end. :param range_str: Range request string :return: tuple of (bytes, start, end) or None """ re_matcher = re.fullmatch(r'([a-z]+)=(\d+)?-(\d+)?', range_str) if not re_matcher or len(re_matcher.groups()) != 3: return None unit, start, end = re_matcher.groups() start = int(start) if type(start) == str else None end = int(end) if type(end) == str else None return unit, start, end
a6817017d708abf774277bf8d9360b63af78860d
27,539
from typing import Union def get_valid_extent(array: Union[np.ndarray, np.ma.masked_array]) -> tuple: """ Return (rowmin, rowmax, colmin, colmax), the first/last row/column of array with valid pixels """ if not array.dtype == 'bool': valid_mask = ~get_mask(array) else: valid_mask = array cols_nonzero = np.where(np.count_nonzero(valid_mask, axis=0) > 0)[0] rows_nonzero = np.where(np.count_nonzero(valid_mask, axis=1) > 0)[0] return rows_nonzero[0], rows_nonzero[-1], cols_nonzero[0], cols_nonzero[-1]
9b906fcd88c901f84ff822d8ba667936d8b623ed
27,540
def epsmu2nz(eps, mu):#{{{ """ Accepts permittivity and permeability, returns effective index of refraction and impedance""" N = np.sqrt(eps*mu) N *= np.sign(N.imag) Z = np.sqrt(mu / eps) return N, Z
7575d4596706f574f2a88627982edfb704d81d94
27,542
from typing import List import re def tokenize_numbers(text_array: List[str]) -> List[str]: """ Splits large comma-separated numbers and floating point values. This is done by replacing commas with ' @,@ ' and dots with ' @.@ '. Args: text_array: An already tokenized text as list Returns: A list of strings with tokenized numbers Example:: >>> tokenize_numbers(["$", "5,000", "1.73", "m"]) ["$", "5", "@,@", "000", "1", "@.@", "73", "m"] """ tokenized = [] for i in range(len(text_array)): reg, sub = MATCH_NUMBERS replaced = re.sub(reg, sub, text_array[i]).split() tokenized.extend(replaced) return tokenized
f87b94850baeefd242ad2bcc89858fc05662a638
27,543
def main( datapath, kwdpath, in_annot, note_types, batch, ): """ Select notes for an annotation batch, convert them to CoNLL format and save them in folders per annotator. The overview of the batch is saved as a pickled DataFrame. Parameters ---------- datapath: Path path to raw data main folder kwdpath: Path path to the xlsx keywords file in_annot: list list of paths to batch pkl's that are currently in annotation and haven't been processed yet (these notes are excluded from the selection) note_types: {list, None} list of note types to select; if None, all note types are selected batch: str name of the batch Returns ------- None """ # load raw data print("Loading raw data...") all_2017 = pd.read_pickle(datapath / '2017_raw/processed.pkl') all_2018 = pd.read_pickle(datapath / '2018_raw/processed.pkl') all_2020 = pd.read_pickle(datapath / '2020_raw/processed.pkl') cov_2020 = pd.read_pickle(datapath / '2020_raw/ICD_U07.1/notes_[U07.1]_2020_q1_q2_q3.pkl') non_cov_2020 = remove_on_multikeys(all_2020, cov_2020, ['MDN', 'NotitieID']) data = {'2017': all_2017, '2018': all_2018, 'cov_2020': cov_2020, 'non_cov_2020': non_cov_2020} # annotated to exclude print("Loading annotated and 'in annotation'...") annotated = pd.read_csv(datapath / 'annotated_notes_ids.csv', dtype={'MDN': str, 'NotitieID': str}) in_annotation = pd.concat([pd.read_pickle(f) for f in in_annot]) exclude = annotated.NotitieID.append(in_annotation.NotitieID) # exclude annotated and sample / select specific note types def exclude_annotated_and_sample(df, annotated, n_sample=50000, random_state=45): print(f"Before exclusion: {len(df)=}") df = df.loc[~df.NotitieID.isin(annotated)].copy() print(f"After exclusion: {len(df)=}") if len(df) > n_sample: df = df.sample(n_sample, random_state=random_state) print(f"After sampling: {len(df)=}") return df def exclude_annotated_and_select_type(df, annotated, note_types): print(f"Before exclusion: {len(df)=}") df = df.loc[~df.NotitieID.isin(annotated)].copy() print(f"After exclusion: {len(df)=}") df = df.query(f"Typenotitie == {note_types}") print(f"After type selection: {len(df)=}") return df if note_types is None: for source, df in data.items(): print(f"{source}:") data[source] = exclude_annotated_and_sample(df, exclude) else: for source, df in data.items(): print(f"{source}:") data[source] = exclude_annotated_and_select_type(df, exclude, note_types=note_types) # keywords search keywords = pd.read_excel(kwdpath) keywords['regex'] = keywords.apply(lambda row: get_regex(row.keyword, row.regex_template_id), axis=1) reg_dict = get_reg_dict(keywords) print("Looking for keyword matches...") for source, df in data.items(): data[source] = find_keywords(df, reg_dict) # select notes print("Selecting notes for the batch...") batch_args = BATCH_SETTINGS[batch] df = select_notes(data, **batch_args) tab = df.pivot_table( index=['annotator'], columns=['source', 'samp_meth'], values='NotitieID', aggfunc='count', margins=True, margins_name='Total', ).to_string() print(f"Batch overview:\n{tab}") # save batch info df pklpath = PATHS.getpath('data_to_inception_conll') / f"{batch}.pkl" df.to_pickle(pklpath) print(f"Batch df is saved: {pklpath}") # convert to conll and save in folder per annotator conllpath = PATHS.getpath('data_to_inception_conll') nlp = spacy.load('nl_core_news_sm') annotators = BATCH_SETTINGS[batch]["annotators"] for annotator in annotators: outdir = conllpath / batch / annotator outdir.mkdir(exist_ok=True, parents=True) print(f"Converting notes to CoNLL and saving in {outdir}") annot = df.query("annotator == @annotator") annot.apply(row_to_conllfile, axis=1, nlp=nlp, outdir=outdir, batch=batch) print("Done!")
cdbb4a6dbd91adccd4fdfdb2bb7c653d109801f2
27,544
def mask_to_bbox(mask): """ Convert mask to bounding box (x, y, w, h). Args: mask (np.ndarray): maks with with 0 and 255. Returns: List: [x, y, w, h] """ mask = mask.astype(np.uint8) contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) x = 0 y = 0 w = 0 h = 0 for contour in contours: tmp_x, tmp_y, tmp_w, tmp_h = cv2.boundingRect(contour) if tmp_w * tmp_h > w * h: x = tmp_x y = tmp_y w = tmp_w h = tmp_h return [x, y, w, h]
d8fc111bdb3a2c52866ad35b8e930e5ac8ebf0ba
27,545
def find_divisor(n, limit=1000000): """ Use sieve to find first prime divisor of given number :param n: number :param limit: sieve limit :return: prime divisor if exists in sieve limit """ primes = get_primes(limit) for prime in primes: if n % prime == 0: return prime raise (Exception("No divisors found in range %d" % limit))
c990f25e055d8f8b1053cf94d6f9bb2d4fcff0bb
27,546
def sort_all(batch, lens): """ Sort all fields by descending order of lens, and return the original indices. """ if batch == [[]]: return [[]], [] unsorted_all = [lens] + [range(len(lens))] + list(batch) sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))] return sorted_all[2:], sorted_all[1]
175f97a88371992472f1e65a9403910f404be914
27,547
def bh(p, fdr): """ From vector of p-values and desired false positive rate, returns significant p-values with Benjamini-Hochberg correction """ p_orders = np.argsort(p) discoveries = [] m = float(len(p_orders)) for k, s in enumerate(p_orders): if p[s] <= (k+1) / m * fdr: discoveries.append(s) else: break return np.array(discoveries, dtype=int)
ace0924fa72b39085d4504c3bb015f81e1c78546
27,548
from datetime import datetime def guess_if_last(lmsg): """Guesses if message is the last one in a group""" msg_day = lmsg['datetime'].split('T')[0] msg_day = datetime.datetime.strptime(msg_day, '%Y-%m-%d') check_day = datetime.datetime.today() - datetime.timedelta(days=1) if msg_day >= check_day: return True return False
2d62caeb21daff4c5181db4a0b5cd833916fb945
27,550
import time def refine_by_split(featIds, n, m, topo_rules, grid_layer, progress_bar = None, labelIter = None ) : """ Description ---------- Split input_features in grid_layer and check their topology Parameters ---------- featIds : ids of features from grid_layer to be refined n : number of split for selected cells in the horizontal direction m : number of split for selected cells in the vertical direction topo_rules : topological rules for the propagation of refinement grid_layer : grid layer to be refined progress_bar : progress bar in dialog labelIter : iteration label in dialog Returns ------- Nothing, just grid_layer is updated Examples -------- >>> """ start_time = time.time() # -- Procedure for regular structured grids (MODFLOW , n_max = 1) if topo_rules['nmax'] == 1 : # build feature dictionary all_features = {feature.id(): feature for feature in grid_layer.getFeatures()} # init fix dictionary rowFixDict = { 'id': [] , 'n':[], 'm':[] } colFixDict = { 'id': [] , 'n':[], 'm':[] } # Initialize spatial index grid_layerIndex = QgsSpatialIndex(grid_layer.getFeatures()) # get bbox of grid layer grid_bbox = grid_layer.extent() # iterate over initial feature set # -- cells that have to be split horizontally if n > 1 : for featId in featIds : # only consider featId if current row has not been considered before if featId not in rowFixDict['id'] : # build bounding box over row bbox = all_features[featId].geometry().boundingBox() bbox.setXMinimum( grid_bbox.xMinimum() ) bbox.setXMaximum( grid_bbox.xMaximum() ) bbox.setYMinimum( bbox.yMinimum() + TOLERANCE ) bbox.setYMaximum( bbox.yMaximum() - TOLERANCE ) # get features in current row rowFeatIds = grid_layerIndex.intersects( bbox ) # update fix_dict with features in current row this_fix_dict = { 'id':rowFeatIds , 'n':[n]*len(rowFeatIds), 'm':[1]*len(rowFeatIds) } rowFixtDict = update_fix_dict(rowFixDict,this_fix_dict) # -- cells that have to be split along columns if m > 1 : for featId in featIds : # only consider featId if current row has not been considered before if featId not in colFixDict['id'] : # build bounding box over column bbox = all_features[featId].geometry().boundingBox() bbox.setXMinimum( bbox.xMinimum() + TOLERANCE ) bbox.setXMaximum( bbox.xMaximum() - TOLERANCE ) bbox.setYMinimum( grid_bbox.yMinimum() ) bbox.setYMaximum( grid_bbox.yMaximum() ) # get features in current column colFeatIds = grid_layerIndex.intersects( bbox ) # update fix_dict with features in current column this_fix_dict = { 'id':colFeatIds , 'n':[1]*len(colFeatIds), 'm':[m]*len(colFeatIds) } colFixtDict = update_fix_dict(colFixDict,this_fix_dict) fix_dict = rowFixDict.copy() fix_dict = update_fix_dict(fix_dict,colFixDict) newFeatIds = split_cells(fix_dict, grid_layer) #print("OPTIM OVER %s sec" % (time.time() - start_time)) return() # -- Refinement procedure for nested grids # init iteration counter itCount = 0 # init fix dict fix_dict = { 'id': featIds , 'n':[n]*len(featIds), 'm':[m]*len(featIds) } # Continue until input_features is empty while len(fix_dict['id']) > 0: # Split input_features newFeatIds = split_cells(fix_dict, grid_layer) # Get all the features all_features = {feature.id(): feature for feature in grid_layer.getFeatures()} # Initialize spatial index grid_layerIndex = QgsSpatialIndex() # Fill spatial Index for feat in all_features.values(): grid_layerIndex.insertFeature(feat) # re-initialize the list of features to be fixed fix_dict = { 'id':[] , 'n':[], 'm':[] } # Initialize progress bar if progress_bar is not None : progress_bar.setRange(0,100) progress_bar.setValue(0) count = 0 countMax = len(newFeatIds) countUpdate = countMax * 0.05 # update each 5% # Iterate over newFeatures to check topology for newFeatId in newFeatIds: # Get the neighbors of newFeatId that must be fixed this_fix_dict = check_topo( newFeatId, n, m, topo_rules, all_features, grid_layer, grid_layerIndex) # Update fix_dict with this_fix_dict fix_dict = update_fix_dict(fix_dict,this_fix_dict) # update counter count += 1 # update progress_bar if int( np.fmod( count, countUpdate ) ) == 0: prog = int( count / countMax * 100 ) if progress_bar is not None : progress_bar.setValue(prog) QApplication.processEvents() if progress_bar is not None : progress_bar.setValue(100) # Update iteration counter itCount+=1 if labelIter is not None : labelIter.setText(unicode(itCount))
6a604bbea471d8401cbabe65a04e13a51c389413
27,551
def read_custom_enzyme(infile): """ Create a list of custom RNase cleaving sites from an input file """ outlist = [] with open(infile.rstrip(), 'r') as handle: for line in handle: if '*' in line and line[0] != '#': outlist.append(line.rstrip()) return outlist
144c6de30a04faa2c9381bfd36bc79fef1b78443
27,552
def all_features(movie_id): """Returns the concatenation of visual and audio features for a movie The numbers of frames are not egal for the visual and the audio feature. The overnumerous frame are deleted. """ T_v = all_visual_features(movie_id) T_a = all_audio_features(movie_id) min_ = min(T_v.shape[0], T_a.shape[0]) T_v = T_v[:min_, :] T_a = T_a[:min_, :] return np.concatenate((T_v, T_a), axis=1)
df46d70d2023beb0d707d62ef9cf4f2af6f9c102
27,553
def resnet34(pretrained=False, shift='TSM',num_segments = 8, flow_estimation=0,**kwargs): """Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ if (shift =='TSM'): model = ResNet(BasicBlock, BasicBlock, [3, 4, 6, 3],num_segments=num_segments , flow_estimation=flow_estimation, **kwargs) if pretrained: pretrained_dict = model_zoo.load_url(model_urls['resnet34']) new_state_dict = model.state_dict() for k, v in pretrained_dict.items(): if (k in new_state_dict): new_state_dict.update({k:v}) # print ("%s layer has pretrained weights" % k) model.load_state_dict(new_state_dict) return model
db8468b1eb0c0d5ed022ee70623c67390dedef15
27,554
def normalize_data_v3(data, cal_zero_points=np.arange(-4, -2, 1), cal_one_points=np.arange(-2, 0, 1), **kw): """ Normalizes data according to calibration points Inputs: data (numpy array) : 1D dataset that has to be normalized cal_zero_points (range) : range specifying what indices in 'data' correspond to zero cal_one_points (range) : range specifying what indices in 'data' correspond to one """ # Extract zero and one coordinates I_zero = np.mean(data[cal_zero_points]) I_one = np.mean(data[cal_one_points]) # Translate the date trans_data = data - I_zero # Normalize the data one_zero_dist = I_one - I_zero normalized_data = trans_data / one_zero_dist return normalized_data
3c9017abb58745964cd936db21d79a6df4896b4e
27,555
def read(path, encoding="utf-8"): """Read string from text file. """ with open(path, "rb") as f: return f.read().decode(encoding)
674a20d7dca76f2c18b140cde0e93138b69d94ea
27,556
def min_equals_max(min, max): """ Return True if minimium value equals maximum value Return False if not, or if maximum or minimum value is not defined """ return min is not None and max is not None and min == max
1078e9ed6905ab8b31b7725cc678b2021fc3bc62
27,557
def loadDataSet(fileName): """ 加载数据 解析以tab键分隔的文件中的浮点数 Returns: dataMat : feature 对应的数据集 labelMat : feature 对应的分类标签,即类别标签 """ # 获取样本特征的总数,不算最后的目标变量 numFeat = len(open(fileName).readline().split('\t')) - 1 dataMat = [] labelMat = [] fr = open(fileName) for line in fr.readlines(): # 读取每一行 lineArr = [] # 删除一行中以tab分隔的数据前后的空白符号 curLine = line.strip().split('\t') # i 从0到2,不包括2 for i in range(numFeat): # 将数据添加到lineArr List中,每一行数据测试数据组成一个行向量 lineArr.append(float(curLine[i])) # 将测试数据的输入数据部分存储到dataMat 的List中 dataMat.append(lineArr) # 将每一行的最后一个数据,即类别,或者叫目标变量存储到labelMat List中 labelMat.append(float(curLine[-1])) return dataMat, labelMat
3c9fd81d1aeb2e3723081e3a1b11d07ec08e2118
27,558
def infer_app_url(headers: dict, register_path: str) -> str: """ ref: github.com/aws/chalice#485 :return: The Chalice Application URL """ host: str = headers["host"] scheme: str = headers.get("x-forwarded-proto", "http") app_url: str = f"{scheme}://{host}{register_path}" return app_url
f3c0d1a19c8a78a0fd2a8663e2cb86427ff8f61b
27,559
def get_index_where_most_spikes_in_unit_list(unit_list): """Returns the position in the list of units that has the most spikes. :param unit_list: list of unit dictionaries :return: index :rtype: int """ return np.argmax(Recordings.count_spikes_in_unit_list(unit_list))
41f4b1f4ae1fc1b813769bf9f4606848cb700c11
27,560
def ocr_boxes( img, boxes, halve=False, resize=False, blur=True, sharpen=False, erode=False, dilate=False, lang='eng', config=None): """ Detect strings in multiple related boxes and concatenate the results. Parameters ---------- halve : bool, optional Scan half of the rectangle resize : bool, optional Resize the box before scanning blur : bool, optional Blur the box for better results sharpen : bool, optional Sharpen the image erode : bool, optional Make the text thinner dilate : bool, optional Make the text thicker lang : str, optional Language for the Tesseract engine config: str, optional Configuration string for the 'config' parameter of 'pytesseract' Returns ------- str Concatenated string of strings detected in each box See Also -------- ocr_box """ string_list = [] for order, box in enumerate(boxes): coordinates = box[1] box_string = ocr_box( img, coordinates, halve=halve, resize=resize, blur=blur, sharpen=sharpen, erode=erode, dilate=dilate, lang=lang, config=config) if box_string != 'None' and box_string != 'N/A': string_list.append(box_string) try: string_list[0] #Check if list is empty ocr_string = ' '.join(string_list) except: ocr_string = 'None' return ocr_string
c273118beb263e6ad837cbb343ad0829bd367d30
27,561
def multiply_inv_gaussians(mus, lambdas): """Multiplies a series of Gaussians that is given as a list of mean vectors and a list of precision matrices. mus: list of mean with shape [n, d] lambdas: list of precision matrices with shape [n, d, d] Returns the mean vector, covariance matrix, and precision matrix of the product """ assert len(mus) == len(lambdas) batch_size = int(mus[0].shape[0]) d_z = int(lambdas[0].shape[-1]) identity_matrix = tf.reshape(tf.tile(tf.eye(d_z), [batch_size,1]), [-1,d_z,d_z]) lambda_new = tf.reduce_sum(lambdas, axis=0) + identity_matrix mus_summed = tf.reduce_sum([tf.einsum("bij, bj -> bi", lamb, mu) for lamb, mu in zip(lambdas, mus)], axis=0) sigma_new = tf.linalg.inv(lambda_new) mu_new = tf.einsum("bij, bj -> bi", sigma_new, mus_summed) return mu_new, sigma_new, lambda_new
d8bdce66668ebf5e94f86310633c699ae7c98e16
27,562
import random def mutationShuffle(individual, indpb): """ Inputs : Individual route Probability of mutation betwen (0,1) Outputs : Mutated individual according to the probability """ size = len(individual) for i in range(size): if random.random() < indpb: swap_indx = random.randint(0, size - 2) if swap_indx >= i: swap_indx += 1 individual[i], individual[swap_indx] = \ individual[swap_indx], individual[i] return individual,
dea67e03b2905f1169e1c37b3456364fb55c7174
27,563
def best_shoot_angle(agent_coord: tuple, opponents: list): """ Tries to shoot, if it fail, kicks to goal randomly """ # Get best shoot angle: best_angles = [] player_coord = np.array(agent_coord) goal_limits = [np.array([0.9, -0.2]), np.array([0.9, 0]), np.array([0.9, 0.2])] for goal_limit in goal_limits: angles = [] for op_idx in range(0, len(opponents)): op_coord = np.array([opponents[op_idx].x_pos, opponents[op_idx].y_pos]) angles.append(get_angle(goalie=op_coord, player=player_coord, point=goal_limit)) best_angles.append(min(angles)) # return the best angles avaiable return max(best_angles)
98c7cb90fc28b4063ce6c14a84fe8989907268d6
27,564
from typing import Optional def _parse_line(lineno: int, line: str) -> Optional[UciLine]: # pylint: disable=unsubscriptable-object """Parse a line, raising UciParseError if it is not valid.""" match = _LINE_REGEX.match(line) if not match: raise UciParseError("Error on line %d: unrecognized line type" % lineno) if match[4] == "#": return _parse_comment(lineno, match[3], match[5]) elif match[8]: if match[8] == "package": return _parse_package(lineno, match[10]) elif match[8] == "config": return _parse_config(lineno, match[10]) elif match[8] == "option": return _parse_option(lineno, match[10]) elif match[8] == "list": return _parse_list(lineno, match[10]) return None
eab49cf766b15d769d7b194176276741d197d359
27,567
from django.contrib.auth import login from django.contrib.auth import authenticate from django.contrib.auth import login def login(request, template_name="lfs/checkout/login.html"): """Displays a form to login or register/login the user within the check out process. The form's post request goes to lfs.customer.views.login where all the logic happens - see there for more. """ # If the user is already authenticate we don't want to show this view at all if request.user.is_authenticated(): return HttpResponseRedirect(reverse("lfs_checkout")) shop = lfs.core.utils.get_default_shop(request) # If only anonymous checkout allowed we don't want to show this view at all. if shop.checkout_type == CHECKOUT_TYPE_ANON: return HttpResponseRedirect(reverse("lfs_checkout")) # Using Djangos default AuthenticationForm login_form = AuthenticationForm() login_form.fields["username"].label = _(u"E-Mail") register_form = RegisterForm() if request.POST.get("action") == "login": login_form = AuthenticationForm(data=request.POST) login_form.fields["username"].label = _(u"E-Mail") if login_form.is_valid(): login(request, login_form.get_user()) return lfs.core.utils.set_message_cookie(reverse("lfs_checkout"), msg=_(u"You have been logged in.")) elif request.POST.get("action") == "register": register_form = RegisterForm(data=request.POST) if register_form.is_valid(): email = register_form.data.get("email") password = register_form.data.get("password_1") # Create user user = User.objects.create_user( username=email, email=email, password=password) # Notify lfs.core.signals.customer_added.send(user) # Log in user user = authenticate(username=email, password=password) login(request, user) return lfs.core.utils.set_message_cookie(reverse("lfs_checkout"), msg=_(u"You have been registered and logged in.")) return render_to_response(template_name, RequestContext(request, { "login_form": login_form, "register_form": register_form, "anonymous_checkout": shop.checkout_type != CHECKOUT_TYPE_AUTH, }))
33d23ba50cff73580ca45519ad26d218194bd341
27,568
import inspect def _is_valid_concrete_plugin_class(attr): """ :type attr: Any :rtype: bool """ return ( inspect.isclass(attr) and issubclass(attr, BasePlugin) and # Heuristic to determine abstract classes not isinstance(attr.secret_type, abstractproperty) )
383e0bff4edac6623b9a2d2da45c3c47c982d72e
27,569
def arrQuartiles(arr, arrMap=None, method=1, key=None, median=None): """ Find quartiles. Also supports dicts. This function know about this quartile-methods: 1. Method by Moore and McCabe's, also used in TI-85 calculator. 2. Classical method, also known as "Tukey's hinges". In common cases it use values from original set, not create new. 3. Mean between method[1] and method[2]. :param int method: Set method for find quartiles. :Example: >>> arrQuartiles([1, 5, 6, 7, 9, 12, 15, 19, 20], method=1) (5.5, 9, 17.0) >>> arrQuartiles([1, 5, 6, 7, 9, 12, 15, 19, 20], method=2) (6, 9, 15) >>> arrQuartiles([1, 5, 6, 7, 9, 12, 15, 19, 20], method=3) (5.75, 9, 16.0) >>> arrQuartiles([1, 1, 3, 5, 7, 9, 10, 14, 18], method=1) (2.0, 7, 12.0) >>> arrQuartiles([1, 1, 3, 5, 7, 9, 10, 14, 18], method=2) (3, 7, 10) >>> arrQuartiles([1, 1, 3, 5, 7, 9, 10, 14, 18], method=3) (2.5, 7, 11.0) """ if method not in (1, 2, 3): raise ValueError('Unknown method: %s'%method) if not arr: return (0, 0, 0) elif len(arr)==1: #? что лучше отдавать if isDict(arr): r=key(arr.values()[0]) if isFunction(key) else arr.values()[0] else: r=key(arr[0]) if isFunction(key) else arr[0] return (0, r, r+1) if not arrMap: arrMap=arrCreateIndexMap(arr, key=key) if median is None: median=arrMedian(arr, arrMap, key=key) def getHalve(isLow=True, includeM=False): tArr=[] for i in arrMap: v=key(arr[i]) if isFunction(key) else arr[i] if isLow and (v<=median if includeM else v<median): tArr.append(v) elif not isLow and (v>=median if includeM else v>median): tArr.append(v) tArrMap=range(len(tArr)) return tArr, tArrMap if method in (1, 2): #methods "Moore and McCabe's" and "Tukey's hinges" tHalveL, tHalveL_arrMap=getHalve(True, method==2) tHalveH, tHalveH_arrMap=getHalve(False, method==2) qL=arrMedian(tHalveL, tHalveL_arrMap) qH=arrMedian(tHalveH, tHalveH_arrMap) elif method==3: #mean between method[1] and method[2] tHalveL1, tHalveL1_arrMap=getHalve(True, False) tHalveH1, tHalveH1_arrMap=getHalve(False, False) qL1=arrMedian(tHalveL1, tHalveL1_arrMap) qH1=arrMedian(tHalveH1, tHalveH1_arrMap) tHalveL2, tHalveL2_arrMap=getHalve(True, True) tHalveH2, tHalveH2_arrMap=getHalve(False, True) qL2=arrMedian(tHalveL2, tHalveL2_arrMap) qH2=arrMedian(tHalveH2, tHalveH2_arrMap) qL=(qL1+qL2)/2.0 qH=(qH1+qH2)/2.0 return qL, median, qH
52de942dc59f293a35696579bd35c877e15c091d
27,571
def check_response_status_code(url, response, print_format): """ check and print response status of an input url. Args: url (str) : url text. response (list) : request response from the url request. print_format (str) : format to print the logs according to. """ if response.status_code == 200: print(print_format % (url, colored(".", "green"))) return f"✓ [{response.status_code}] {url}" else: print(print_format % (url, colored("x", "red"))) return f"✘ [{response.status_code}] {url}"
c16f453174ef48b2e6accfba4ec075bb8f2cad0d
27,572
def make_state_manager(config): """ Parameters ---------- config : dict Parameters for this StateManager. Returns ------- state_manager : StateManager The StateManager to be used by the Controller. """ manager_dict = { "hierarchical": HierarchicalStateManager } if config is None: config = {} # Use HierarchicalStateManager by default manager_type = config.pop("type", "hierarchical") manager_class = manager_dict[manager_type] state_manager = manager_class(**config) return state_manager
f662b97c052ccc9188b9f7236b79228cfba983c2
27,573
from datetime import datetime def get_books_information(url: str, headers: dict = None) -> list: """ Create list that contains dicts of cleaned data from google book API. Parameters ---------- url: str link to resources (default: link to volumes with q=war, target of recrutment task) headers: dict dict with params for requests.get method. It should contain key and value matched for given url. Returns ------- books_list: list List containing dict with chosen values. """ url_json = get_books_json(url, headers) items_list = url_json["items"] list_of_wanted_params = ["id", "volumeInfo_authors", "volumeInfo_title", "volumeInfo_publishedDate", "volumeInfo_categories", "volumeInfo_averageRating", "volumeInfo_ratingsCount", "volumeInfo_imageLinks_thumbnail"] parameters_list = [] for item in items_list: flatten_df = pd.json_normalize(item, sep='_') flatten_dict = flatten_df.to_dict(orient='records')[0] book_params_dict = {key.rsplit("_", 1)[-1]: flatten_dict.get(key) for key in list_of_wanted_params} book_params_dict["_id"] = book_params_dict.pop("id") book_date = book_params_dict['publishedDate'] if "-" in book_date: date_time_obj = datetime.datetime.strptime(book_date, '%Y-%m-%d') book_re_year = int(date_time_obj.year) book_params_dict['publishedYear'] = book_re_year else: book_re_year = int(book_date) book_params_dict['publishedYear'] = book_re_year parameters_list.append(book_params_dict) return parameters_list
1643c2368ed452f9f12ea81f25c468c0240404b4
27,574
from typing import Dict from typing import Type import pkg_resources def get_agents() -> Dict[str, Type[Agent]]: """Returns dict of agents. Returns: Dictionary mapping agent entrypoints name to Agent instances. """ agents = {} for entry_point in pkg_resources.iter_entry_points("agents"): agents[entry_point.name] = entry_point.load() return agents
5b6959278f0dd4b53d81c38d1ef919b0756a5533
27,575
from typing import MutableMapping from typing import Any from typing import Set import yaml def rewrite_schemadef(document: MutableMapping[str, Any]) -> Set[str]: """Dump the schemadefs to their own file.""" for entry in document["types"]: if "$import" in entry: rewrite_import(entry) elif "name" in entry and "/" in entry["name"]: entry_file, entry["name"] = entry["name"].split("/") for field in entry["fields"]: field["name"] = field["name"].split("/")[2] rewrite_types(field, entry_file, True) with open(entry_file[1:], "a", encoding="utf-8") as entry_handle: yaml.main.dump( [entry], entry_handle, Dumper=yaml.dumper.RoundTripDumper ) entry["$import"] = entry_file[1:] del entry["name"] del entry["type"] del entry["fields"] seen_imports = set() def seen_import(entry: MutableMapping[str, Any]) -> bool: if "$import" in entry: external_file = entry["$import"] if external_file not in seen_imports: seen_imports.add(external_file) return True return False return True types = document["types"] document["types"][:] = [entry for entry in types if seen_import(entry)] return seen_imports
d6beec5cb41cef16f23cbcad154998b6d79f1cdb
27,576
def decor(decoration, reverse=False): """ Return given decoration part. :param decoration: decoration's name :type decoration:str :param reverse: true if second tail of decoration wanted :type reverse:bool :return decor's tail """ if isinstance(decoration, str) is False: raise artError(DECORATION_TYPE_ERROR) decoration = indirect_decoration(decoration) if reverse is True: return DECORATIONS_MAP[decoration][-1] return DECORATIONS_MAP[decoration][0]
c8fb59478f72ab76298361fa2dfa4a8720c5a46b
27,577
def group_data(data, degree=3, hash=hash): """ numpy.array -> numpy.array Groups all columns of data into all combinations of triples """ new_data = [] m,n = data.shape for indicies in combinations(range(n), degree): if 5 in indicies and 7 in indicies: print "feature Xd" elif 2 in indicies and 3 in indicies: print "feature Xd" else: new_data.append([hash(tuple(v)) for v in data[:,indicies]]) return array(new_data).T
886afd2f12dc0b4bf0da5648de8bcbf294b74c74
27,578
def get_dataset(dataset_name: str, *args, **kwargs): """Get the Dataset instancing lambda from the dictionary and return its evaluation. This way, a Dataset object is only instanced when this function is evaluated. Arguments: dataset_name: The name of the Dataset to be instanced. Must be a key in the `DATASETS` dictionary. Returns: The corresponding Dataset object. """ try: return DATASETS[dataset_name](*args, **kwargs) except KeyError as e: raise type(e)("{} is not registered a Dataset.".format(dataset_name))
f3ab44f6ebb9d867bdf9b08d31d70f25832fbb7d
27,579
def _numeric_handler_factory(charset, transition, assertion, illegal_before_underscore, parse_func, illegal_at_end=(None,), ion_type=None, append_first_if_not=None, first_char=None): """Generates a handler co-routine which tokenizes a numeric component (a token or sub-token). Args: charset (sequence): Set of ordinals of legal characters for this numeric component. transition (callable): Called upon termination of this component (i.e. when a character not in ``charset`` is found). Accepts the previous character ordinal, the current character ordinal, the current context, and the previous transition. Returns a Transition if the component ends legally; otherwise, raises an error. assertion (callable): Accepts the first character's ordinal and the current context. Returns True if this is a legal start to the component. illegal_before_underscore (sequence): Set of ordinals of illegal characters to precede an underscore for this component. parse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a thunk that lazily parses the token. illegal_at_end (Optional[sequence]): Set of ordinals of characters that may not legally end the value. ion_type (Optional[IonType]): The type of the value if it were to end on this component. append_first_if_not (Optional[int]): The ordinal of a character that should not be appended to the token if it occurs first in this component (e.g. an underscore in many cases). first_char (Optional[int]): The ordinal of the character that should be appended instead of the character that occurs first in this component. This is useful for preparing the token for parsing in the case where a particular character is peculiar to the Ion format (e.g. 'd' to denote the exponent of a decimal value should be replaced with 'e' for compatibility with python's Decimal type). """ @coroutine def numeric_handler(c, ctx): assert assertion(c, ctx) if ion_type is not None: ctx.set_ion_type(ion_type) val = ctx.value if c != append_first_if_not: first = c if first_char is None else first_char val.append(first) prev = c c, self = yield trans = ctx.immediate_transition(self) while True: if _ends_value(c): if prev == _UNDERSCORE or prev in illegal_at_end: _illegal_character(c, ctx, '%s at end of number.' % (_chr(prev),)) trans = ctx.event_transition(IonThunkEvent, IonEventType.SCALAR, ctx.ion_type, parse_func(ctx.value)) if c == _SLASH: trans = ctx.immediate_transition(_number_slash_end_handler(c, ctx, trans)) else: if c == _UNDERSCORE: if prev == _UNDERSCORE or prev in illegal_before_underscore: _illegal_character(c, ctx, 'Underscore after %s.' % (_chr(prev),)) else: if c not in charset: trans = transition(prev, c, ctx, trans) else: val.append(c) prev = c c, _ = yield trans return numeric_handler
5033f406918b6e9aeecba9d1470f3b6bc10761fa
27,580
def unsafe_content(s): """Take the string returned by safe_content() and recreate the original string.""" # don't have to "unescape" XML entities (parser does it for us) # unwrap python strings from unicode wrapper if s[:2]==unichr(187)*2 and s[-2:]==unichr(171)*2: s = s[2:-2].encode('us-ascii') return s
ec92c977838412ff6fb67297a300cbc77a450661
27,581
def _AcosGrad(op, grad): """Returns grad * -1/sqrt(1-x^2).""" x = op.inputs[0] with ops.control_dependencies([grad.op]): x = math_ops.conj(x) x2 = math_ops.square(x) one = constant_op.constant(1, dtype=grad.dtype) den = math_ops.sqrt(math_ops.subtract(one, x2)) inv = math_ops.reciprocal(den) return -grad * inv
7a6d67e09f6b2997476c41fe37c235349e5c5c79
27,582
def converter(n, decimals=0, base=pi): """takes n in base 10 and returns it in any base (default is pi with optional x decimals""" # your code here result = [] # setting list to capture converted digit # check for a proper base if base <= 0: base = pi # if n is zero then set the starting power as zero if n != 0: s_power = int(log(abs(n), base)) else: s_power = 0 div = abs(n) # starting point for iterative division if n < 0: result.append("-") """ n / base^(S_power - 0) = n1 R / base^(S_power - 1) = n2 ( R=remainder, change remainder when division R / base^(S_power - 0) = n3 yields > 1) .......... R / base^(0) = n.. R / base^(-1) = n.. .......... R / base^(-decimals) = n.. """ for i in range(0, (s_power + abs(decimals) + 1)): if (s_power - i) == -1: result.append(".") var = int(div / base**(s_power - i)) result.append(num[var]) if var > 0: div = div % base**(s_power - i) return "".join(result)
e14b2667ad73ddee13df2dd526c5cdcdfde8a2f5
27,583
import json import requests def mediaAddcastInfo(): """ :return: """ reqUrl = req_url('media', "/filmCast/saveFilmCastList") if reqUrl: url = reqUrl else: return "服务host匹配失败" headers = { 'Content-Type': 'application/json', 'X-Region-Id': '2', } body = json.dumps( { "filmId": 4934007395236725, "castList": [{ "castId": 4886219191944844, "phone": "", "headImage": "https://g.smartcinemausa.com/images/1e4e91eb2d674d64a8922b9530525e94-649-649.jpg", "international": { "zh_TW": { "castName": "導演信命", "filmRoleName": "導演角色" }, "zh_CN": { "castName": "导演姓名", "filmRoleName": "导演角色" }, "en_US": { "castName": "Name of director", "filmRoleName": "Director role" } }, "filmRoleId": "2", "roleId": None, "idx": 1 }, { "castId": 4886219191944845, "phone": "", "headImage": "https://g.smartcinemausa.com/images/3239d1b2a1ee4a4a87e6a2f534812e8e-658-658.jpg", "international": { "zh_TW": { "castName": "繁体主演姓名", "filmRoleName": "繁体主演角色" }, "zh_CN": { "castName": "简体主演姓名", "filmRoleName": "简体主演角色" }, "en_US": { "castName": "Starring name", "filmRoleName": "Starring role" } }, "filmRoleId": "1", "roleId": None, "idx": 2 }, { "castId": 4886219191944846, "phone": "", "headImage": "https://g.smartcinemausa.com/images/33687cfa20fa49a3b064e4e884d8efe7-900-900.jpg", "international": { "zh_TW": { "castName": "我是編劇", "filmRoleName": "編劇人" }, "zh_CN": { "castName": "我是编剧", "filmRoleName": "编剧人" }, "en_US": { "castName": "Screenwriter", "filmRoleName": "Screenwriter json" } }, "filmRoleId": "4", "roleId": None, "idx": 3 }, { "castId": 4886219191944847, "phone": "", "headImage": "https://g.smartcinemausa.com/images/d5f54a31a1e5466f991b50dc31eb024f-441-441.jpg", "international": { "zh_TW": { "castName": "我是群演", "filmRoleName": "路人丁" }, "zh_CN": { "castName": "我是群演", "filmRoleName": "路人丁" }, "en_US": { "castName": "我是群演", "filmRoleName": "路人丁" } }, "filmRoleId": "101", "roleId": None, "idx": 4 }] } ) result = requests.post(url=url, headers=headers, data=body) resultJ = json.loads(result.content) return resultJ
0826c22acea653234817f9838cd82cbc7045b07b
27,584
def process_register_fns(): # noqa: WPS210 """Registration in FNS process.""" form = RegisterFnsForm() if form.validate_on_submit(): email = current_user.email name = current_user.username phone = form.telephone.data registration_fns(email, name, phone) flash('Ждите SMS от KKT-NALOG') return redirect( url_for('user.profile', username=current_user.username) ) for field, errors in form.errors.items(): for error in errors: flash( f'Ошибка в поле "{getattr(form, field).label.text}": - {error}' ) return redirect(url_for('user.profile', username=current_user.username))
f5e7b3b88bbcd610d675aef020538fb87d1a8887
27,585
import socket def validate_args(args): """ Checks if the arguments are valid or not. """ # Is the number of sockets positive ? if not args.number > 0: print("[ERROR] Number of sockets should be positive. Received %d" % args.number) exit(1) # Is a valid IP address or valid name ? try: servers = socket.getaddrinfo(args.address, args.port, proto=socket.IPPROTO_TCP) return servers[0] except socket.gaierror as error: print(error) print("Please, provide a valid IPv4, IPv6 address or a valid domain name.") exit(1)
37a77f59ae78e3692e08742fab07f35cf6801e54
27,586
import re def register(request): """注册""" if request.method == 'GET': # 显示注册页面 return render(request, 'register.html') else: # 进行注册处理 # 1.接收参数 username = request.POST.get('user_name') # None password = request.POST.get('pwd') email = request.POST.get('email') # 2.参数校验(后端校验) # 校验数据的完整性 if not all([username, password, email]): return render(request, 'register.html', {'errmsg': '数据不完整'}) # 校验邮箱格式 if not re.match(r'^[a-z0-9][\w.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email): return render(request, 'register.html', {'errmsg': '邮箱格式不正确'}) # 校验用户名是否已注册 try: user = User.objects.get(username=username) except User.DoesNotExist: user = None if user is not None: return render(request, 'register.html', {'errmsg': '用户名已注册'}) # 校验邮箱是否被注册... # 3.业务处理:注册 user = User.objects.create_user(username, email, password) user.is_active = 0 user.save() # 4.返回应答: 跳转到首页 return redirect(reverse('goods:index'))
54f8a1eb8a0378e18634f0708cc5c4de44b88b20
27,587
def triangulation_to_vedo_mesh(tri, **kwargs): """ Transform my triangulation class to Trimesh class :param kwargs: """ coords = tri.coordinates trias = tri.triangulation m = vedo.Mesh([coords, trias], **kwargs) return m
73c14809269dab93ef3acef973e8727528e7d1bf
27,588
def build_dropout(cfg, default_args=None): """Builder for drop out layers.""" return build_from_cfg(cfg, DROPOUT_LAYERS, default_args)
e04797e311992da39ea2b3b07c90508e3afa44ce
27,589
def conv2d_transpose(inputs, num_output_channels, kernel_size, scope, stride=[1, 1], padding='SAME', use_xavier=True, stddev=1e-3, weight_decay=0.0, activation_fn=tf.nn.relu, bn=False, bn_decay=None, is_training=None): """ 2D convolution transpose with non-linear operation. Args: inputs: no_dropout-D tensor variable BxHxWxC num_output_channels: int kernel_size: a list of 128 ints scope: string stride: a list of 128 ints padding: 'SAME' or 'VALID' use_xavier: bool, use xavier_initializer if true stddev: float, stddev for truncated_normal init weight_decay: float activation_fn: function bn: bool, whether to use batch norm bn_decay: float or float tensor variable in [0,fv_noise] is_training: bool Tensor variable Returns: Variable tensor Note: conv2d(conv2d_transpose(a, num_out, ksize, stride), a.shape[-fv_noise], ksize, stride) == a """ with tf.variable_scope(scope) as sc: kernel_h, kernel_w = kernel_size num_in_channels = inputs.get_shape()[-1].value kernel_shape = [kernel_h, kernel_w, num_output_channels, num_in_channels] # reversed to conv2d kernel = _variable_with_weight_decay('weights', shape=kernel_shape, use_xavier=use_xavier, stddev=stddev, wd=weight_decay) stride_h, stride_w = stride # from slim.convolution2d_transpose def get_deconv_dim(dim_size, stride_size, kernel_size, padding): dim_size *= stride_size if padding == 'VALID' and dim_size is not None: dim_size += max(kernel_size - stride_size, 0) return dim_size # caculate output shape batch_size = inputs.get_shape()[0].value height = inputs.get_shape()[1].value width = inputs.get_shape()[2].value out_height = get_deconv_dim(height, stride_h, kernel_h, padding) out_width = get_deconv_dim(width, stride_w, kernel_w, padding) output_shape = [batch_size, out_height, out_width, num_output_channels] outputs = tf.nn.conv2d_transpose(inputs, kernel, output_shape, [1, stride_h, stride_w, 1], padding=padding) biases = _variable_on_cpu('biases', [num_output_channels], tf.constant_initializer(0.0)) outputs = tf.nn.bias_add(outputs, biases) if bn: outputs = batch_norm_for_conv2d(outputs, is_training, bn_decay=bn_decay, scope='bn') if activation_fn is not None: outputs = activation_fn(outputs) return outputs
df0bd44b9ef10ca9af3f1106f314b82cf84358b8
27,590
def open_py_file(f_name): """ :param f_name: name of the .py file (with extension) :return: a new file with as many (1) as needed to not already exist """ try: f = open(f_name, "x") return f, f_name except IOError: return open_py_file(f_name[:-3] + "(1)" + f_name[-3:])
bcc40f7757e0e4573b69b843c5050ad27546be53
27,591
def cast_rays(rays_o, rays_d, z_vals, r): """shoot viewing rays from camera parameters. Args: rays_o: tensor of shape `[...,3]` origins of the rays. rays_d: tensor of shape `[...,3]` directions of the rays. z_vals: tensor of shape [...,N] segments of the rays r: radius of ray cone. 1/f*2/\sqrt(12) Returns: mu: tensor of shape `[...,N,3]` mean query positions cov_diag: tensor of shape `[...,N,3]` corvirance of query positions. """ t0, t1 = z_vals[..., :-1], z_vals[..., 1:] c, d = (t0 + t1)/2, (t1 - t0)/2 t_mean = c + (2*c*d**2) / (3*c**2 + d**2) t_var = (d**2)/3 - (4/15) * ((d**4 * (12*c**2 - d**2)) / (3*c**2 + d**2)**2) r_var = r**2 * ((c**2)/4 + (5/12) * d**2 - (4/15) * (d**4) / (3*c**2 + d**2)) mu = rays_d[..., None, :] * t_mean[..., None] null_outer_diag = 1 - (rays_d**2) / \ sum(rays_d**2, axis=-1, keepdims=True) cov_diag = (t_var[..., None] * (rays_d**2)[..., None, :] + r_var[..., None] * null_outer_diag[..., None, :]) return mu + rays_o[..., None, :], cov_diag
a647feda10263755e519e6965517ca311fcf87df
27,592
from typing import Any def ifnone(x: Any, y: Any): """ returns x if x is none else returns y """ val = x if x is not None else y return val
f2c7cf335ff919d610a23fac40d6af61e6a1e595
27,593
def get_session(region, default_bucket): """Gets the sagemaker session based on the region. Args: region: the aws region to start the session default_bucket: the bucket to use for storing the artifacts Returns: `sagemaker.session.Session instance """ boto_session = boto3.Session(region_name=region) sagemaker_client = boto_session.client("sagemaker") runtime_client = boto_session.client("sagemaker-runtime") return sagemaker.session.Session( boto_session=boto_session, sagemaker_client=sagemaker_client, sagemaker_runtime_client=runtime_client, default_bucket=default_bucket, )
e4944e2b21f9bc666ad29a8ad1d09ac8c44df390
27,594
def xi_einasto_at_r(r, M, conc, alpha, om, delta=200, rhos=-1.): """Einasto halo profile. Args: r (float or array like): 3d distances from halo center in Mpc/h comoving M (float): Mass in Msun/h; not used if rhos is specified conc (float): Concentration alpha (float): Profile exponent om (float): Omega_matter, matter fraction of the density delta (int): Overdensity, default is 200 rhos (float): Scale density in Msun h^2/Mpc^3 comoving; optional Returns: float or array like: Einasto halo profile. """ r = _ArrayWrapper(r, 'r') xi = _ArrayWrapper.zeros_like(r) cluster_toolkit._lib.calc_xi_einasto(r.cast(), len(r), M, rhos, conc, alpha, delta, om, xi.cast()) return xi.finish()
68b86aabc08bd960e1fa25691a1f421414dc25fa
27,595
import re def _rst_links(contents: str) -> str: """Convert reStructuredText hyperlinks""" links = {} def register_link(m: re.Match[str]) -> str: refid = re.sub(r"\s", "", m.group("id").lower()) links[refid] = m.group("url") return "" def replace_link(m: re.Match[str]) -> str: text = m.group("id") refid = re.sub(r"[\s`]", "", text.lower()) try: return f"[{text.strip('`')}]({links[refid]})" except KeyError: return m.group(0) # Embedded URIs contents = re.sub( r"`(?P<text>[^`]+)<(?P<url>.+?)>`_", r"[\g<text>](\g<url>)", contents ) # External Hyperlink Targets contents = re.sub( r"^\s*..\s+_(?P<id>[^\n:]+):\s*(?P<url>http\S+)", register_link, contents, flags=re.MULTILINE, ) contents = re.sub(r"(?P<id>[A-Za-z0-9_\-.:+]|`[^`]+`)_", replace_link, contents) return contents
c7c937cdc04f9d5c3814538978062962e6407d65
27,596
def fibonacci(n: int) -> int: """ Calculate the nth Fibonacci number using naive recursive implementation. :param n: the index into the sequence :return: The nth Fibonacci number is returned. """ if n == 1 or n == 2: return 1 else: return fibonacci(n - 1) + fibonacci(n - 2)
08de1ff55f7cada6a940b4fb0ffe6ba44972b42d
27,597
def validator_msg(msg): """Validator decorator wraps return value in a message container. Usage: @validator_msg('assert len(x) <= 2') def validate_size(x): return len(x) <= 2 Now, if `validate_size` returns a falsy value `ret`, for instance if provided with range(4), we will have `ret.msg = 'assert len(x) <= 2 is false on input [0, 1, 2, 3]`. On the other hand, if `validate_size` returns a true value `ret`, for instance if provided with [0, 1], we will have `ret.msg = 'assert len(x) <= 2 is true on input [0, 1]`. """ def real_decorator(function): class Wrapper(object): def __init__(self, function, msg): self._function = function self._msg = msg @property def msg(self): return self._msg def _build_argument_str(self, *args, **kwargs): elems = [str(arg) for arg in args] elems += [ "{}={}".format(str(key), str(value)) for key, value in kwargs.items() ] return ", ".join(elems) def __call__(self, *args, **kwargs): res = self._function(*args, **kwargs) argument_str = self._build_argument_str(*args, **kwargs) return BooleanResult(res, self._msg, argument_str) return Wrapper(function, msg) return real_decorator
1913408a9e894fbe3ca70a4b6529322958e75678
27,598
def iter_fgsm_t(x_input_t, preds_t, target_labels_t, steps, total_eps, step_eps, clip_min=0.0, clip_max=1.0, ord=np.inf, targeted=False): """ I-FGSM attack. """ eta_t = fgm(x_input_t, preds_t, y=target_labels_t, eps=step_eps, ord=ord, clip_min=clip_min, clip_max=clip_max, targeted=targeted) - x_input_t if ord == np.inf: eta_t = tf.clip_by_value(eta_t, -total_eps, total_eps) elif ord in [1, 2]: reduc_ind = list(xrange(1, len(tf.shape(eta_t)))) if ord == 1: norm = tf.reduce_sum(tf.abs(eta_t), reduction_indices=reduc_ind, keep_dims=True) elif ord == 2: norm = tf.sqrt(tf.reduce_sum(tf.square(eta_t), reduction_indices=reduc_ind, keep_dims=True)) eta_t = eta_t * total_eps / norm x_adv_t = x_input_t + eta_t return x_adv_t
381f50b66ce3105c13f31afae5127e8574346a36
27,599
def list_rbd_volumes(pool): """List volumes names for given ceph pool. :param pool: ceph pool name """ try: out, err = _run_rbd('rbd', '-p', pool, 'ls') except processutils.ProcessExecutionError: # No problem when no volume in rbd pool return [] return [line.strip() for line in out.splitlines()]
4c80b6c952a19834a79622c31453af62db12e740
27,600
from typing import Optional from datetime import datetime def parse_date(string: Optional[str]) -> Optional[date]: """ Parse a string to a date. """ if not string or not isinstance(string, str): return None try: return isoparse(string[:10]).date() except ValueError: pass try: return datetime.strptime(string[:10], "%Y-%m-%d").date() except ValueError: pass
f29bb415a0f8d08dbcefe8ae95b5d08f898eecfb
27,601
def butter_bandpass(lowcut, highcut, fs, order=5): """ Taken from https://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html Creates a butterworth bandpass filter of order 'order', over frequency band [lowcut, highcut]. :param lowcut: Lowcut frequency in Hz :param highcut: Highcut frequency in Hz :param fs: Sampling frequency in Hz :param order: width of influence in points :return filt: Filter to be passed to butter_bandpass_filter for evaluation """ nyq = 0.5 * fs low = lowcut / nyq high = highcut / nyq b, a = sig.butter(order, [low, high], btype='band') return b, a
148f581e36a0d1a53f931b5ce2301db8dd2cde17
27,602
def get_archives_to_prune(archives, hook_data): """Return list of keys to delete.""" files_to_skip = [] for i in ['current_archive_filename', 'old_archive_filename']: if hook_data.get(i): files_to_skip.append(hook_data[i]) archives.sort(key=itemgetter('LastModified'), reverse=False) # sort from oldest to newest # Drop all but last 15 files return [i['Key'] for i in archives[:-15] if i['Key'] not in files_to_skip]
7701e7b145ea28148b77eb63475d7b0e9127d2f0
27,604
def tf_pad(tensor, paddings, mode): """ Pads a tensor according to paddings. mode can be 'ZERO' or 'EDGE' (Just use tf.pad for other modes). 'EDGE' padding is equivalent to repeatedly doing symmetric padding with all pads at most 1. Args: tensor (Tensor). paddings (list of list of non-negative ints). mode (str). Returns: Padded tensor. """ paddings = np.array(paddings, dtype=int) assert np.all(paddings >= 0) while not np.all(paddings == 0): new_paddings = np.array(paddings > 0, dtype=int) paddings -= new_paddings new_paddings = tf.constant(new_paddings) if mode == 'ZERO': tensor = tf.pad(tensor, new_paddings, 'CONSTANT', constant_values=0) elif mode == 'EDGE': tensor = tf.pad(tensor, new_paddings, 'SYMMETRIC') else: raise Exception('pad type {} not recognized'.format(mode)) return tensor
e2e1e9ac2cbef63c4b12bdf35eb35090973d744a
27,605
def compute_reference_gradient_siemens(duration_ms, bandwidth, csa=0): """ Description: computes the reference gradient for exporting RF files to SIEMENS format, assuming the gradient level curGrad is desired. Theory: the reference gradient is defined as that gradient for which a 1 cm slice is excited for a 5.12 ms pulse. Demanding the product Slicethickness * gamma * gradient * duration to be equal in both cases (reference and current), one obtains gamma*refGrad*(10 mm)*(5.12 ms) = gamma*curGrad*curThickness*pulse.tp However, gamma*curGrad*curThickness = the pulses's bandwidth, pulseBW, so refGrad = pulseBW*pulse.tp / (gamma*(10 mm)*(5.12 ms)) In general, the formula is, (pulse_duration[ms]*pulse_bandwidth[kHz]) Ref_grad [mT/m] = -------------------------------- (Gyr[kHz/mT] * Ref_slice_thickness[m]* Ref_pulse_duration[ms]) Input Variables Variables Name Units Description ------------------------------------ duration_ms ms Duration of pulse bandwidth kHz Bandwidth of current pulse csa kHz Chemical shift artifact "immunity" - see below. Optional, set to 0 if not present. Output Variables Variables Name Units Description ------------------------------------ ref_grad mT/m Reference gradient Chemical Shift Artifact immunity: Since different chemical shifts shift the excitation region, it follows the if we want to excite a range [-x,+x], we will not actually excite that range for any offset other than 0 if we calibrate our gradient for 0 offset. However, we CAN calibrate our gradient for 0 offset BUT excite a larger range [-x-dx, x+dx] such that the pulse will affect all chemical shifts equally. This of course comes at the price of exciting a larger region which might have unwanted signals. This however is good for: 1. Cases in which there are not external unwanted signals. 2. For dual-band suppression pulses, one sometimes uses the PASSBAND, which is also the VOI, to calibrate the pulse. If we don't want any spins in the VOI affected despite their varying chemical shifts we can grant them immunity, at the cost of pushing away the suppression bands - this works if, e.g., we're interested in killing off fat away from the VOI, so we don't care if a bit of signal comes from the region close to the VOI. To use, set CSA to the range of +-chemical shifts you want to feel the pulse. e.g., if you want all spins +-100 Hz from resonance to be affected equally within the VOI, set CSA = 0.1. """ ref_slice_thickness = 0.01 # in meters ref_duration = 5.12 # ms gyromagnetic_ratio = 42.57 # kHz/milliTesla ref_grad = ((bandwidth-2*csa)*duration_ms)/(gyromagnetic_ratio*ref_slice_thickness*ref_duration) return ref_grad
65cf8bd8e805e37e5966170daeae90594e45595e
27,606
def schedule_conv2d_NHWC_quantized_native(cfg, outs): """ Interface for native schedule_conv2d_NHWC_quantized""" return _schedule_conv2d_NHWC_quantized(cfg, outs, False)
450b6fb914c8b0c971604319417d56c7148b5737
27,607
def calc_square_dist(a, b, norm=True): """ Calculating square distance between a and b a: [bs, npoint, c] b: [bs, ndataset, c] """ a = tf.expand_dims(a, axis=2) # [bs, npoint, 1, c] b = tf.expand_dims(b, axis=1) # [bs, 1, ndataset, c] a_square = tf.reduce_sum(tf.square(a), axis=-1) # [bs, npoint, 1] b_square = tf.reduce_sum(tf.square(b), axis=-1) # [bs, 1, ndataset] a = tf.squeeze(a, axis=2) # [bs, npoint,c] b = tf.squeeze(b, axis=1) # [bs, ndataset, c] if norm: dist = tf.sqrt(a_square + b_square - 2 * tf.matmul(a, tf.transpose(b, [0, 2, 1]))) / tf.cast(tf.shape(a)[-1], tf.float32) # [bs, npoint, ndataset] else: dist = a_square + b_square - 2 * tf.matmul(a, tf.transpose(b, [0, 2, 1])) # [bs, npoint, ndataset] return dist
d855e438d4bcca4eb43a61fa6761a1cd5afd7731
27,608
def read_config(lines): """Read the config into a dictionary""" d = {} current_section = None for i, line in enumerate(lines): line = line.strip() if len(line) == 0 or line.startswith(";"): continue if line.startswith("[") and line.endswith("]"): current_section = line[1:-1] d[current_section] = {} else: if "=" not in line: raise ValueError("No = in line: {}".format(line)) key, val = line.split("=", maxsplit=1) if key in d[current_section]: old_val = d[current_section][key] if type(old_val) == list: old_val.append(val) else: d[current_section][key] = [old_val, val] else: d[current_section][key] = val return d
613ed9291ab6546700b991fc9a5fc301c55ae497
27,609
def get_cert_subject_hash(cert): """ Get the hash value of the cert's subject DN :param cert: the certificate to get subject from :return: The hash value of the cert's subject DN """ try: public_bytes = cert.public_bytes(encoding=serialization.Encoding.PEM) cert_c = crypto.load_certificate(crypto.FILETYPE_PEM, public_bytes) hash_subject = cert_c.get_subject().hash() except Exception: LOG.exception() raise exception.SysinvException(_( "Failed to get certificate subject hash.")) return hash_subject
070284984018ba08f568541a4d7ba815d72bd025
27,610
import requests def icon_from_url(url: str): """ A very simple attempt at matching up a game URL with its representing icon. We attempt to parse the URL and return the favicon. If that fails, return a pre-determined image based on the URL. """ if not url: return # Allow the user to override icons in the configuration for partial_string, icon_url in configuration.get("icons").items(): if partial_string in url: return icon_url # Try some known-good icons for partial_string, icon_url in default_icon_urls.items(): if partial_string in url: return icon_url # Try the site's favicon parts = urlparse(url) if parts.netloc: icon_url = f"{parts.scheme}://{parts.netloc}/favicon.ico" response = requests.get(icon_url) try: response.raise_for_status() return icon_url except requests.HTTPError: LOGGER.warning( "Invalid icon URL (return code %s): %s", response.status_code, icon_url ) except requests.ConnectionError as e: LOGGER.warning("Error while connecting to %s: %s", icon_url, e) return None
85f8a539cacbc86e58cf4e3815babdc744352626
27,611
import numpy def extract_spikes(hd5_file, neuron_num=0): """Extracts the spiking data from the hdf5 file. Returns an array of spike times. Keyword arguments: neuron_num -- the index of the neuron you would like to access. """ with h5py.File(hd5_file, "r+") as f: neuron_list = f['NF']['value']['neurons']['value'] if len(neuron_list) <= 10: neuron_str = "_" + str(neuron_num) else: neuron_str = "_" + "0" * (2 - len(str(neuron_num))) + str(neuron_num) timestamps = numpy.array(neuron_list[neuron_str]['value']['timestamps']['value'][0]) return(timestamps)
85df1525595c0141dd885d041b54b02aa5dc1283
27,612
def problem_kinked(x): """Return function with kink.""" return np.sqrt(np.abs(x))
12897b83fa4c42cfbe608add92cf5ef0736463ef
27,613
def read_dicom(filename): """Read DICOM file and convert it to a decent quality uint8 image. Parameters ---------- filename: str Existing DICOM file filename. """ try: data = dicom.read_file(filename) img = np.frombuffer(data.PixelData, dtype=np.uint16).copy() if data.PhotometricInterpretation == 'MONOCHROME1': img = img.max() - img img = img.reshape((data.Rows, data.Columns)) return img, data.ImagerPixelSpacing[0] except: return None
e8b621dfb7348e12e1fe8d00ae02009789205e86
27,614
import logging def _filter_all_warnings(record) -> bool: """Filter out credential error messages.""" if record.name.startswith("azure.identity") and record.levelno == logging.WARNING: message = record.getMessage() if ".get_token" in message: return not message return True
f16490ef39f9e3a63c791bddcba1c31176b925b7
27,615
import copy def _to_minor_allele_frequency(genotype): """ Use at your own risk """ g_ = copy.deepcopy(genotype) m_ = g_.metadata clause_ = m_.allele_1_frequency > 0.5 F = MetadataTF m_.loc[clause_,[F.K_ALLELE_0, F.K_ALLELE_1]] = m_.loc[clause_,[F.K_ALLELE_1, F.K_ALLELE_0]].values m_.loc[clause_, F.K_ALLELE_1_FREQUENCY] = 1 - m_.loc[clause_, F.K_ALLELE_1_FREQUENCY].values variants = g_.variants for i,swap in enumerate(clause_): if swap: variants[i] = 2 - variants[i] return g_
424bf40e29f103abea3dd30ee662c3c695545a10
27,616
def ioka(z=0, slope=950, std=None, spread_dist='normal'): """Calculate the contribution of the igm to the dispersion measure. Follows Ioka (2003) and Inoue (2004), with default slope value falling in between the Cordes and Petroff reviews. Args: z (array): Redshifts. slope (float): Slope of the DM-z relationship. std (float): Spread around the DM-z relationship. spread_dist (str): Spread function option. Choice from ('normal', 'lognormal', 'log10normal') Returns: dm_igm (array): Dispersion measure of intergalactic medium [pc/cm^3] """ if std is None: std = 0.2*slope*z # Set up spread distribution mean = slope*z if spread_dist == 'normal': f = np.random.normal elif spread_dist == 'lognormal': def f(mean, std): return gd.lognormal(mean, std) elif spread_dist == 'log10normal': def f(mean, std): return gd.log10normal(mean, std) else: raise ValueError('spread_dist input not recognised') return f(mean, std).astype(np.float32)
422f0e7d6a7c88b8ea6666e192271db81d966743
27,617
def nn_policy(state_input, policy_arch, dim_action, **kwargs): """ Fully-connected agent policy network """ with tf.variable_scope('policy_net', reuse=tf.AUTO_REUSE): for i, h in enumerate(policy_arch): state_input = layer.Dense(h, activation='tanh', # dtype='float64', name="fc_{}".format(i))(state_input) action_out = layer.Dense(dim_action, activation='tanh', # dtype='float64', name="fc_action_out")(state_input) return action_out, state_input
8513ba88fe77076711c9aaf6bb148df8d9f18c1a
27,618
def _check_geom(geom): """Check if a geometry is loaded in. Returns the geometry if it's a shapely geometry object. If it's a wkt string or a list of coordinates, convert to a shapely geometry. """ if isinstance(geom, BaseGeometry): return geom elif isinstance(geom, str): # assume it's a wkt return loads(geom) elif isinstance(geom, list) and len(geom) == 2: # coordinates return Point(geom)
5f7e1cc405ab6c67cb6f8342e23698d1e330d49c
27,619
from typing import List import glob def read_csvs_of_program(program: str) -> List[pd.DataFrame]: """ Given the name of an algorithm program, collects the list of CVS benchmarks recorded for that particular program. :param program: name of the program which benchmarks should be retrieved :return: list of benchmark CSV files """ csv_files = glob.glob(f'./{program}_*.csv') dataframes_per_program = [] for csv_file in csv_files: dataframe = pd.read_csv(csv_file, sep=';', decimal='.', encoding='utf-8') dataframes_per_program.append(dataframe) return dataframes_per_program
de763b5f790150f0340c58fc9d3d53f16d530f34
27,620
from typing import List from typing import Tuple def print_best_metric_found( tuning_status: TuningStatus, metric_names: List[str], mode: str ) -> Tuple[int, float]: """ Prints trial status summary and the best metric found. :param tuning_status: :param metric_names: :param mode: :return: trial-id and value of the best metric found """ if tuning_status.overall_metric_statistics.count == 0: return # only plot results of the best first metric for now in summary, plotting the optimal metrics for multiple # objectives would require to display the Pareto set. metric_name = metric_names[0] print("-" * 20) print(f"Resource summary (last result is reported):\n{str(tuning_status)}") if mode == 'min': metric_per_trial = [ (trial_id, stats.min_metrics.get(metric_name, np.inf)) for trial_id, stats in tuning_status.trial_metric_statistics.items() ] metric_per_trial = sorted(metric_per_trial, key=lambda x: x[1]) else: metric_per_trial = [ (trial_id, stats.max_metrics.get(metric_name, -np.inf)) for trial_id, stats in tuning_status.trial_metric_statistics.items() ] metric_per_trial = sorted(metric_per_trial, key=lambda x: -x[1]) best_trialid, best_metric = metric_per_trial[0] print(f"{metric_name}: best {best_metric} for trial-id {best_trialid}") print("-" * 20) return best_trialid, best_metric
e91c3222e66ded7ce3ab4ddcf52a7ae77fe84e9f
27,621
def get_svn_revision(path = '.', branch = 'HEAD'): """ Returns the SVN revision associated with the specified path and git branch/tag/hash. """ svn_rev = "None" cmd = "git log --grep=^git-svn-id: -n 1 %s" % (branch) result = exec_cmd(cmd, path) if result['err'] == '': for line in result['out'].split('\n'): if line.find("git-svn-id") > 0: svn_rev = line.split("@")[1].split()[0] break return svn_rev
76d94aca9453e1d949bf70fc6bff4b77bb519479
27,622
def coerce_str_to_bool(val: t.Union[str, int, bool, None], strict: bool = False) -> bool: """ Converts a given string ``val`` into a boolean. :param val: any string representation of boolean :param strict: raise ``ValueError`` if ``val`` does not look like a boolean-like object :return: ``True`` if ``val`` is thruthy, ``False`` otherwise. :raises ValueError: if ``strict`` specified and ``val`` got anything except ``['', 0, 1, true, false, on, off, True, False]`` """ if isinstance(val, str): val = val.lower() flag = ENV_STR_BOOL_COERCE_MAP.get(val, None) if flag is not None: return flag if strict: raise ValueError('Unsupported value for boolean flag: `%s`' % val) return bool(val)
5ff88bee44b07fb1bd34d1734ba72485a2412b0c
27,623
import csv def print_labels_from_csv(request): """ Generates a PDF with labels from a CSV. """ if request.FILES: response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename=labels-from-csv.pdf' canvas = Canvas(response, pagesize=(Roll.width, Roll.height)) hoja = Roll(LogisticsLabel, canvas) if request.POST.get('96x30'): canvas = Canvas(response, pagesize=(Roll96x30.width, Roll96x30.height)) hoja = Roll96x30(LogisticsLabel96x30, canvas) iterator = hoja.iterator() label_list = csv.reader(request.FILES.get('labels')) label_list.next() # consumo header for row in label_list: label = iterator.next() label.name = row[0].upper() label.address = '\n'.join(row[1:]) label.draw() hoja.flush() canvas.save() return response else: return render(request, 'print_labels_from_csv.html')
1674d136f5ed183913961fe2f7ce23d4b245f3d7
27,624
def get_ucs_node_list(): """ Get UCS nodes """ nodeList = [] api_data = fit_common.rackhdapi('/api/2.0/nodes') for node in api_data['json']: if node["obms"] != [] and node["obms"][0]["service"] == "ucs-obm-service": nodeList.append(node) return nodeList
4fc81f7e71a33be3670d99916828a2348b0e63cd
27,625
def wysiwyg_form_fields(context): """Returns activity data as in field/value pair""" app = context['app_title'] model = context['entity_title'] try: return wysiwyg_config(app, model) except (KeyError, AttributeError): return None
86abca4a8711c3d5eec425975ee00055d0e78ae2
27,627
def thetagrids(angles=None, labels=None, fmt=None, **kwargs): """ Get or set the theta gridlines on the current polar plot. Call signatures:: lines, labels = thetagrids() lines, labels = thetagrids(angles, labels=None, fmt=None, **kwargs) When called with no arguments, `.thetagrids` simply returns the tuple (*lines*, *labels*). When called with arguments, the labels will appear at the specified angles. Parameters ---------- angles : tuple with floats, degrees The angles of the theta gridlines. labels : tuple with strings or None The labels to use at each radial gridline. The `.projections.polar.ThetaFormatter` will be used if None. fmt : str or None Format string used in `matplotlib.ticker.FormatStrFormatter`. For example '%f'. Note that the angle in radians will be used. Returns ------- lines : list of `.lines.Line2D` The theta gridlines. labels : list of `.text.Text` The tick labels. Other Parameters ---------------- **kwargs *kwargs* are optional `~.Text` properties for the labels. See Also -------- .pyplot.rgrids .projections.polar.PolarAxes.set_thetagrids .Axis.get_gridlines .Axis.get_ticklabels Examples -------- :: # set the locations of the angular gridlines lines, labels = thetagrids(range(45, 360, 90)) # set the locations and labels of the angular gridlines lines, labels = thetagrids(range(45, 360, 90), ('NE', 'NW', 'SW', 'SE')) """ ax = gca() if not isinstance(ax, PolarAxes): raise RuntimeError('thetagrids only defined for polar axes') if all(param is None for param in [angles, labels, fmt]) and not kwargs: lines = ax.xaxis.get_ticklines() labels = ax.xaxis.get_ticklabels() else: lines, labels = ax.set_thetagrids(angles, labels=labels, fmt=fmt, **kwargs) return lines, labels
a595c4f0ff5af7dae7e20b261d11c6f690344db1
27,628