content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def quasi_diagonalize(link): """sort clustered assets by distance""" link = link.astype(int) sort_idx = pd.Series([link[-1, 0], link[-1, 1]]) num_items = link[-1, 3] # idx of original items while sort_idx.max() >= num_items: sort_idx.index = list(range(0, sort_idx.shape[0] * 2, 2)) # make space df0 = sort_idx[sort_idx >= num_items] # find clusters i = df0.index j = df0.values - num_items sort_idx[i] = link[j, 0] # item 1 df0 = pd.Series(link[j, 1], index=i + 1) sort_idx = sort_idx.append(df0) # item 2 sort_idx = sort_idx.sort_index() # re-sort sort_idx.index = list(range(sort_idx.shape[0])) # re-index return sort_idx.tolist()
8f10f62d5f0b3dc7b8687134497dd42f183194b4
20,552
def keyword(variable): """ Verify that the field_name isn't part of know Python keywords :param variable: String :return: Boolean """ for backend in ADAPTERS: if variable.upper() in ADAPTERS[backend]: msg = ( f'Variable "{variable}" is a "{backend.upper()}" ' f"reserved SQL/NOSQL keyword" ) raise SyntaxError(msg) if not VALID_TABLE_FIELD.match(variable) or PYTHON_KEYWORDS.match(variable): raise SyntaxError(f"Field: invalid field name: {variable}") return f"{variable} isn't a known keyword"
b1c6322d3ce3c9ee4bda4eff251af44ca3e2c699
20,554
import logging import json def gcp_api_main(request): """Responds to any HTTP request. Args: request (flask.Request): HTTP request object. Returns: The response text or any set of values that can be turned into a Response object using `make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`. """ logging.basicConfig(level=logging.INFO) try: request_json = request.get_json() if request.args and 'message' in request.args: return request.args.get('message') elif request_json and 'message' in request_json: return request_json['message'] elif request_json and 'stock_data' in request_json and 'name' in request_json: logging.info('run_fb_prophet') return json.dumps( FBProphetThread.run_fb_prophet( json.dumps(request_json['model_input']))).replace('NaN', '"-"') else: return f'Hello World!' except Exception as ex: err_msg = 'Generated an exception: {ex}'.format(ex=ex) logging.error(err_msg) return err_msg
21ec4b1dba4ad6f5dac518a3907cd15579a0ba00
20,555
def box_area_3d(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2, z1, z2) coordinates. Arguments: boxes (Union[Tensor, ndarray]): boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2, z1, z2) format. [N, 6] Returns: area (Union[Tensor, ndarray]): area for each box [N] """ return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 5] - boxes[:, 4])
be8b3c4d58d301d2044e7cfe2844516933c1247f
20,556
from sqlalchemy import create_mock_engine import re def mock_engine(dialect_name=None): """Provides a mocking engine based on the current testing.db. This is normally used to test DDL generation flow as emitted by an Engine. It should not be used in other cases, as assert_compile() and assert_sql_execution() are much better choices with fewer moving parts. """ if not dialect_name: dialect_name = config.db.name buffer = [] def executor(sql, *a, **kw): buffer.append(sql) def assert_sql(stmts): recv = [re.sub(r"[\n\t]", "", str(s)) for s in buffer] assert recv == stmts, recv def print_sql(): d = engine.dialect return "\n".join(str(s.compile(dialect=d)) for s in engine.mock) engine = create_mock_engine(dialect_name + "://", executor) assert not hasattr(engine, "mock") engine.mock = buffer engine.assert_sql = assert_sql engine.print_sql = print_sql return engine
d773a6e2cd0b2060e5dd66d5ec4e758ac7f1f504
20,557
def get_feedback_thread_reply_info_by_reply_to_id(reply_to_id): """Gets the domain object corresponding to the model which is fetched by reply-to-id field. Args: reply_to_id: str. The reply_to_id to search for. Returns: FeedbackThreadReplyInfo or None. The corresponding domain object. """ model = email_models.GeneralFeedbackEmailReplyToIdModel.get_by_reply_to_id( reply_to_id) if model is None: return None return get_feedback_thread_reply_info_from_model(model)
e27521030717a1dc15cd9e678dabafba86007f90
20,558
def _cross_correlations(n_states): """Returns list of crosscorrelations Args: n_states: number of local states Returns: list of tuples for crosscorrelations >>> l = _cross_correlations(np.arange(3)) >>> assert l == [(0, 1), (0, 2), (1, 2)] """ l = n_states cross_corr = [[(l[i], l[j]) for j in l[1:][i:]] for i in l[:-1]] return [item for sublist in cross_corr for item in sublist]
c11c5655ba655a29991421c6627a3eaca4f7681d
20,559
def select_interface(worker): """ It gets a worker interface channel to do something. """ interfaces = worker.interfaces_list() if len(interfaces) == 0: print ' Error. Worker without interface known.' return -1 elif len(interfaces) == 1: return 1 option = raw_input(' Select interface -> ') if option == '': return -1 while not option.isdigit() or int(option) < 1 or int(option) > len(interfaces): print ' Error. None worker interface was selected.' option = raw_input(' Select interface -> ') if option == '': return -1 return int(option)
97d90670dd69d57b4e1f85df250a0abc56106fb6
20,560
def get_middle(arr): """ Get middle point ???? """ n_val = np.array(arr.shape) / 2.0 n_int = n_val.astype(np.int0) # print(n_int) if n_val[0] % 2 == 1 and n_val[1] % 2 == 1: return arr[n_int[0], n_int[1]] if n_val[0] % 2 == 0 and n_val[1] % 2 == 0: return np.average(arr[n_int[0]:n_int[0] + 2, n_int[1]:n_int[1] + 2]) if n_val[0] % 2 == 1 and n_val[1] % 2 == 0: return np.average(arr[n_int[0], n_int[1]:n_int[1]+2]) return np.average(arr[n_int[0]:n_int[0]+2, n_int[1]])
9651bcadc991bbf7a0c635a8870356f422d43e7e
20,561
def annotate_segmentation(image, segmentation): """Return annotated segmentation.""" annotation = AnnotatedImage.from_grayscale(image) for i in segmentation.identifiers: region = segmentation.region_by_identifier(i) color = pretty_color() annotation.mask_region(region.border.dilate(), color) props = skimage.measure.regionprops(segmentation) for p in props: try: minr, minc, maxr, maxc = p.bbox cval = int(p.centroid[1]) line = skimage.draw.line(minr, cval, maxr, cval) annotation.mask_region(line, (0, 255, 0)) except IndexError: # Don't draw line if it falls outside of the image. pass return annotation
2fadbe8d2339e37bea0dbfe054199002a3997b20
20,562
def get_champ_data(champ: str, tier: int, rank: int): """ Gives Champ Information by their champname, tier, and rank. """ champ_info = NewChampsDB() try: champ_info.get_data(champ, tier, rank) champs_dict = { "name": f"{champ_info.name}", "released": champ_info.released, "class": champ_info.class_type, "tier": champ_info.tier, "rank": champ_info.rank, "prestige": champ_info.prestige, "hp": champ_info.hp, "attack": champ_info.attack, "crit_rate": champ_info.crit_rate, "crit_dmge": champ_info.crit_dmge, "armor": champ_info.armor, "block_prof": champ_info.block_prof, "energy_resist": champ_info.energy_resist, "physical_resist": champ_info.physical_resist, "crit_resist": champ_info.crit_resist, "sig_info": champ_info.sig_info, "abilities": champ_info.abilities, "challenger_rating": champ_info.challenger_rating, "find": champ_info.find, "tags": champ_info.tags, "abilities": champ_info.abilities, "contact": champ_info.contact, "url_page": f"{champ_info.url_page}", "img_portrait": f"{champ_info.img_portrait}", "champid": f"{champ_info.champid}", } champs_dict.update({"status": 200, "detail": "Successful"}) return champs_dict except Exception as e: if isinstance(e, FileNotFoundError): raise HTTPException(status_code=404, detail="404: " + champ_info.error) elif isinstance(e, KeyError): raise HTTPException(status_code=400, detail="400: " + champ_info.error) else: raise e
7d810fc5ced3d187c68533f42c2443ef8bec651b
20,563
def serving_input_receiver_fn(): """This is used to define inputs to serve the model. Returns: A ServingInputReciever object. """ csv_row = tf.placeholder(shape=[None], dtype=tf.string) features, _ = _make_input_parser(with_target=False)(csv_row) return tf.estimator.export.ServingInputReceiver(features, {'csv_row': csv_row})
bcc6f0c4050d40df114ba4e5d895524f736b463a
20,565
import time def offsetTimer(): """ 'Starts' a timer when called, returns a timer function that returns the time in seconds elapsed since the timer was started """ start_time = time.monotonic() def time_func(): return time.monotonic() - start_time return time_func
348105a408ccedd1fcb840b73d5a58dfd59dd8cc
20,566
from typing import Callable import functools def find_resolution(func: Callable = None) -> Callable: """Decorator that gives the decorated function the image resolution.""" @functools.wraps(func) def wrapper(self: MultiTraceChart, *args, **kwargs): if 'width' not in kwargs: kwargs['width'] = self.resolution[0] if 'height' not in kwargs: kwargs['height'] = self.resolution[1] if 'resolution' in kwargs: kwargs['width'] = kwargs['resolution'][0] kwargs['height'] = kwargs['resolution'][1] del kwargs['resolution'] if 'size' in kwargs: kwargs['width'] = kwargs['size'][0] kwargs['height'] = kwargs['size'][1] del kwargs['size'] return func(self, *args, **kwargs) return wrapper
70edffcec5ac772bd52cb819db589d26497fda87
20,568
def transform_spikes_to_isi(self, spikes, time_epoch, last_event_is_spike=False): """Convert spike times to data array, which is a suitable format for optimization. Parameters ---------- spikes : numpy array (num_neuron,N), dtype=np.ndarray A sequence of spike times for each neuron on each trial. Each entry is 1D array of floats. time_epoch : list of tuples List of N tuples, where N is the number of trials. Each tuple consists of the trial's start time and end time in seconds. Note that the end time should be an actual end time, but not the timeout in the case of last_event_is_spike is True. last_event_is_spike : bool If true, trial termination time will not be recorded. Otherwise, trial termination time will be recorded. Returns ------- data : numpy array (N,2),dtype=np.ndarray. Spike data packed as numpy array of the size (N,2), where each elements is a 1D array of floats. N is the number of trials, and for each trial the first column contains the interspike intervals (ISIs), and the second column contains the corresponding neuronal indices. """ num_neuron, num_trial = spikes.shape # initialize data array data = np.empty((num_trial, 2), dtype=np.ndarray) # indices of neurons that spiked spike_ind = np.empty(num_neuron, dtype=np.ndarray) # transform spikes to interspike intervals format for iTrial in range(num_trial): for iCell in range(num_neuron): spike_ind[iCell] = iCell * np.ones(len(spikes[iCell, iTrial]), dtype=np.int) all_spikes = np.concatenate(spikes[:, iTrial], axis=0) all_spike_ind = np.concatenate(spike_ind[:], axis=0) # create data array data[iTrial, 0] = np.zeros(len(all_spikes) + (not last_event_is_spike)) if all_spikes.shape[0] == 0: data[iTrial, 1] = np.zeros(0) # If no spikes emitted, set to trial beginning time last_spike_time = time_epoch[iTrial][0] else: # sort spike times and neuron index arrays ind_sort = np.argsort(all_spikes) all_spikes = all_spikes[ind_sort] all_spike_ind = all_spike_ind[ind_sort] data[iTrial, 0][1:len(all_spikes)] = all_spikes[1:] - all_spikes[:-1] data[iTrial, 0][0] = all_spikes[0] - time_epoch[iTrial][0] # handle the first ISI last_spike_time = all_spikes[-1] if not last_event_is_spike: data[iTrial, 0][-1] = time_epoch[iTrial][1] - last_spike_time # assign indicies of neurons which fired, -1 to absorption event data[iTrial, 1] = all_spike_ind if last_event_is_spike else np.concatenate((all_spike_ind, [-1])) return data
cc2b54e80e00b10b8cabf79093509fde1980b804
20,569
def api_github_v2(user_profile, event, payload, branches, default_stream, commit_stream, issue_stream, topic_focus = None): """ processes github payload with version 2 field specification `payload` comes in unmodified from github `default_stream` is set to what `stream` is in v1 above `commit_stream` and `issue_stream` fall back to `default_stream` if they are empty This and allowing alternative endpoints is what distinguishes v1 from v2 of the github configuration """ if not commit_stream: commit_stream = default_stream if not issue_stream: issue_stream = default_stream target_stream = commit_stream repository = payload['repository'] if not topic_focus: topic_focus = repository['name'] # Event Handlers if event == 'pull_request': pull_req = payload['pull_request'] subject = github_generic_subject('pull request', topic_focus, pull_req) content = github_generic_content('pull request', payload, pull_req) elif event == 'issues': # in v1, we assume that this stream exists since it is # deprecated and the few realms that use it already have the # stream target_stream = issue_stream issue = payload['issue'] subject = github_generic_subject('issue', topic_focus, issue) content = github_generic_content('issue', payload, issue) elif event == 'issue_comment': # Comments on both issues and pull requests come in as issue_comment events issue = payload['issue'] if 'pull_request' not in issue or issue['pull_request']['diff_url'] is None: # It's an issues comment target_stream = issue_stream noun = 'issue' else: # It's a pull request comment noun = 'pull request' subject = github_generic_subject(noun, topic_focus, issue) comment = payload['comment'] content = ("%s [commented](%s) on [%s %d](%s)\n\n~~~ quote\n%s\n~~~" % (comment['user']['login'], comment['html_url'], noun, issue['number'], issue['html_url'], comment['body'])) elif event == 'push': subject, content = build_message_from_gitlog(user_profile, topic_focus, payload['ref'], payload['commits'], payload['before'], payload['after'], payload['compare'], payload['pusher']['name'], forced=payload['forced'], created=payload['created']) elif event == 'commit_comment': comment = payload['comment'] subject = "%s: commit %s" % (topic_focus, comment['commit_id']) content = ("%s [commented](%s)" % (comment['user']['login'], comment['html_url'])) if comment['line'] is not None: content += " on `%s`, line %d" % (comment['path'], comment['line']) content += "\n\n~~~ quote\n%s\n~~~" % (comment['body'],) return (target_stream, subject, content)
bed307903d7ddcce216919d18accb3ecfd94937d
20,570
from typing import Iterable from typing import Optional from typing import Callable from typing import Dict def concatenate( iterable: Iterable[Results], callback: Optional[Callable] = None, modes: Iterable[str] = ("val", "test"), reduction: str = "none", ) -> Results: """Returns a concatenated Results. Args: iterable (iterable of Results): Iterable of `Results` instance. callback (callable, optional): Called for each `Results`. Must take (`mode`, `index`, `output`, `target`) arguments and return a tuple of ('index', `output`, `target`). modes (iterable of str): Specify modes to concatenate. reduction (str, optional): Reduction. `none` or `mean`. """ modes = list(modes) indexes: Dict[str, list] = {mode: [] for mode in modes} outputs: Dict[str, list] = {mode: [] for mode in modes} targets: Dict[str, list] = {mode: [] for mode in modes} for results in iterable: for mode in modes: if mode not in results: continue result = results[mode] index, output, target = result["index"], result["output"], result["target"] if callback: index, output, target = callback(index, output, target) indexes[mode].append(index) outputs[mode].append(output) targets[mode].append(target) results = Results() for mode in modes: index = np.concatenate(indexes[mode]) output = np.concatenate(outputs[mode]) target = np.concatenate(targets[mode]) dict = ivory.core.collections.Dict() results[mode] = dict(index=index, output=output, target=target) if reduction != "none": results = getattr(results, reduction)() return results
6833a50ddc84d44c942c6e85c1ebbdb793bd78a9
20,571
def parse_version(s: str) -> tuple[int, ...]: """poor man's version comparison""" return tuple(int(p) for p in s.split('.'))
445cd029efa3c8d4331e916f9925daddbc277ada
20,572
def replay_train(DQN, train_batch): """ 여기서 train_batch는 minibatch에서 가져온 data들입니다. x_stack은 state들을 쌓는 용도로이고, y_stack은 deterministic Q-learning 값을 쌓기 위한 용도입니다. 우선 쌓기전에 비어있는 배열로 만들어놓기로 하죠. """ x_stack = np.empty(0).reshape(0, DQN.input_size) # array(10, 4) y_stack = np.empty(0).reshape(0, DQN.output_size) # array(10, 2) # Get stored information from the buffer """for를 통해서 minibatch(train_batch)에서 가져온 값들을 하나씩 꺼냅니다.""" for state, action, reward, next_state, done in train_batch: Q = DQN.predict(state) # terminal if done: Q[0, action] = reward else : # Obtain the Q' values by feeding the new state through our network Q[0, action] = reward + dis * np.max(DQN.predict(next_state)) """ 여기서 mian에 있는 action = np.argmax(mainDQN.predict(state))과 predict가 같이 쓰이고 있기 때문에 Non-stationary targets의 문제가 생깁니다. """ """np.vstack는 y_stack에 쌓기 위한 numpy함수입니다.""" y_stack = np.vstack([y_stack, Q]) x_stack = np.vstack([x_stack, state]) # Train our network using target and predicted Q values on each episode """ 쌓은 stack들을 바로 update로 돌려서 학습을 시킵니다. 학습은 위에서 만들었던 neural network(linear regression)을 통해서 학습이 되겠지요. """ return DQN.update(x_stack, y_stack)
05b85aab223b82637a23853d15cd8e073ecca845
20,573
def make_reverse_macro_edge_name(macro_edge_name): """Autogenerate a reverse macro edge name for the given macro edge name.""" if macro_edge_name.startswith(INBOUND_EDGE_FIELD_PREFIX): raw_edge_name = macro_edge_name[len(INBOUND_EDGE_FIELD_PREFIX) :] prefix = OUTBOUND_EDGE_FIELD_PREFIX elif macro_edge_name.startswith(OUTBOUND_EDGE_FIELD_PREFIX): raw_edge_name = macro_edge_name[len(OUTBOUND_EDGE_FIELD_PREFIX) :] prefix = INBOUND_EDGE_FIELD_PREFIX else: raise AssertionError("Unreachable condition reached: {}".format(macro_edge_name)) reversed_macro_edge_name = prefix + raw_edge_name return reversed_macro_edge_name
807efcc26fb21e553241b2de4d2c6633a24548a2
20,574
def unescaped_split(pattern, string, max_split=0, remove_empty_matches=False, use_regex=False): """ Splits the given string by the specified pattern. The return character (\\n) is not a natural split pattern (if you don't specify it yourself). This function handles escaped split-patterns (and so splits only patterns that are unescaped). :param pattern: A pattern that defines where to split. :param string: The string to split by the defined pattern. :param max_split: Defines the maximum number of splits. If 0 or less is provided, the number of splits is not limited. :param remove_empty_matches: Defines whether empty entries should be removed from the result. :param use_regex: Specifies whether to treat the split pattern as a regex or simple string. :return: An iterator returning the split up strings. """ return _split(string, max_split, remove_empty_matches, unescaped_search_for, pattern, string, 0, 0, use_regex)
5a5cec1a54b94840e13ddec3ca8796a73e908898
20,575
def citation_distance_matrix(graph): """ :param graph: networkx graph :returns: distance matrix, node labels """ sinks = [key for key, outdegree in graph.out_degree() if outdegree==0] paths = {s: nx.shortest_path_length(graph, target=s) for s in sinks} paths_df = pd.DataFrame(paths)#, index=graph.nodes) paths_nonzero_df = 1*~paths_df.isnull() a_paths_nonzero = paths_nonzero_df.values m = a_paths_nonzero intersect = m.dot(m.T) union = m.dot(np.ones(m.shape).T) + np.ones(m.shape).dot(m.T) -intersect union[union==0] = 1 dist = 1 - intersect/union return dist, paths_nonzero_df.index
b3c41164c2081704b3b36ce0c5b1ca55440a88be
20,576
from typing import IO def read_into_dataframe(file: IO, filename: str = "", nrows: int = 100,max_characters: int = 50) -> pd.DataFrame: """Reads a file into a DataFrame. Infers the file encoding and whether a header column exists Args: file (IO): file buffer. filename (str): filename. Used to infer compression. nrows (int, optional): number of rows to peek. Default: 100. max_characters (int, optional): max characters a column name can have to be distinguished from a real text value Returns: A pandas.DataFrame. """ detector = UniversalDetector() for line, text in enumerate(file): detector.feed(text) if detector.done or line > nrows: break detector.close() encoding = detector.result.get("encoding") compression = infer_compression(filename, "infer") file.seek(0, SEEK_SET) contents = file.read() with BytesIO(contents) as file: df0 = pd.read_csv( file, encoding=encoding, compression=compression, sep=None, engine="python", header="infer", nrows=nrows, ) df0_cols = list(df0.columns) #Check if all columns are strins and short strings(text values tend to be long) column_names_checker = all([type(item) == str for item in df0_cols]) if column_names_checker: column_names_checker = all([len(item) < max_characters for item in df0_cols]) #Check if any column can be turned to float conversion_checker= True for item in df0_cols: try: item = float(item) conversion_checker = False break except: pass #Prefix and header final_checker = True if (column_names_checker and conversion_checker) else False header = "infer" if final_checker else None prefix = None if header else "col" with BytesIO(contents) as file: df = pd.read_csv( file, encoding=encoding, compression=compression, sep=None, engine="python", header=header, prefix=prefix, ) return df
fe95c60870779353f2aa751c20ed331a2e0156bf
20,577
from typing import OrderedDict import torch def Navigatev0_action_to_tensor(act: OrderedDict, task=1): """ Creates the following (batch_size, seq_len, 11) action tensor from Navigatev0 actions: 0. cam left 1. cam right 2. cam up 3. cam down 4. place + jump 5. place 6. forward + attack 7. attack 8. forward + jump 9. jump 10. forward """ batch_size, seq_len = act["jump"].shape PLACE_OPTIONS = {"none": 0, "dirt": 1} # ONE_HOT = {0: np.array([1, 0]), 1: np.array([0, 1])} out = torch.zeros((batch_size,seq_len,11)) for b in range(batch_size): for s in range(seq_len): c = act["camera"] # We don't need to check if 0, 1, and 10 are in task actions # since they always will be task_acts = TASK_ACTIONS[task] # Set camera left if c[b,s][0] < -10 and abs(c[b,s][0]) >= abs(c[b,s][1]): out[b,s][0] = 1 # Set camera right elif c[b,s][0] > 10 and abs(c[b,s][0]) >= abs(c[b,s][1]): out[b,s][1] = 1 # Set camera up elif 2 in task_acts and c[b,s][1] < -10 and abs(c[b,s][1]) >= abs(c[b,s][0]): out[b,s][2] = 1 elif 3 in task_acts and c[b,s][1] > 10 and abs(c[b,s][1]) >= abs(c[b,s][0]): out[b,s][3] = 1 elif PLACE_OPTIONS[act["place"][b,s]] == 1: if 4 in task_acts and act["jump"][b,s] == 1: out[b,s][4] = 1 elif 5 in task_acts: out[b,s][5] = 1 elif act["attack"][b,s] == 1: if 6 in task_acts and act["forward"][b,s] == 1: out[b,s][6] = 1 elif 7 in task_acts: out[b,s][7] = 1 elif act["jump"][b,s] == 1: if 8 in task_acts and act["forward"][b,s] == 1: out[b,s][8] = 1 elif 9 in task_acts: out[b,s][9] = 1 else: out[b,s][10] = 1 return out
39d481d2e8597902b18695de97f041606f24f035
20,579
def asfarray(a, dtype=mstype.float32): """ Similar to asarray, converts the input to a float tensor. If non-float dtype is defined, this function will return a float32 tensor instead. Args: a (Union[int, float, bool, list, tuple, numpy.ndarray]): Input data, in any form that can be converted to a `Tensor`. This includes lists, lists of tuples, tuples, tuples of tuples, tuples of lists and numpy.ndarray. dtype (Union[:class:`mindspore.dtype`, str], optional): Designated tensor dtype, can be in format of np.int32, or \'int32\'. If dtype is :class:`None`, the data type of the new tensor will be inferred from `a`. Default is :class:`mindspore.float32`. Returns: Tensor, generated tensor with the specified float dtype. Raises: TypeError: If input arguments have types not specified above. ValueError: If input `a` has different sizes at different dimensions. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore.numpy as np >>> print(np.asfarray([1,2,3])) [1. 2. 3.] """ _check_input_for_asarray(a) if dtype is None: return asarray(a) dtype = _check_dtype(dtype) if dtype not in (mstype.float16, mstype.float32, mstype.float64): dtype = mstype.float32 if isinstance(a, (list, tuple)): # Convert all tuple/nested tuples to lists a = _deep_list(a) # Convert all tensor sub-elements to numpy arrays a = _deep_tensor_to_nparray(a) a = onp.asarray(a) if a.dtype is onp.dtype('object'): raise TypeError(f"For Tensor conversion, the input_data is {a} that contains unsupported element.") if isinstance(a, onp.ndarray): a = Tensor.from_numpy(a) return Tensor(a, dtype)
4da49b2bcab9686b2757cf1b9066c21876f992e6
20,580
from typing import Optional from typing import Callable import click def _verify_option(value: Optional[str], value_proc: Callable) -> Optional[str]: """Verifies that input value via click.option matches the expected value. This sets ``value`` to ``None`` if it is invalid so the rest of the prompt can flow smoothly. Args: value (Optional[str]): Input value. value_proc (Callable): A function to check the validity of ``value``. Returns: (Optional[str]): ``value`` if it is a valid value. ``None`` if it is not. Raises: click.exceptions.UsageError: When ``value`` is invalid. """ if value is None: return value try: value = value_proc(value) except click.exceptions.UsageError as error: click.echo(f"Error: {error.message}", err=True) value = None return value
4d0f58827982924a9d027112ffa3aaeef7634fe8
20,581
def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None): """Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`. e.g. x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32) y = x * x jacobian = batch_jacobian(y, x) # => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]] Args: output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should only depend on `inp[i,...]`. inp: A tensor with shape [b, x1, ..., x_m] use_pfor: If true, uses pfor for computing the Jacobian. Else uses a tf.while_loop. parallel_iterations: A knob to control how many iterations and dispatched in parallel. This knob can be used to control the total memory usage. Returns: A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]` is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked per-example jacobians. Raises: ValueError: if first dimension of `output` and `inp` do not match. (NL) This function is taken from the following (and minimally modified to be used): https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/parallel_for/gradients.py#L81 """ output_shape = output.shape if not output_shape[0].is_compatible_with(inp.shape[0]): raise ValueError("Need first dimension of output shape (%s) and inp shape " "(%s) to match." % (output.shape, inp.shape)) if output_shape.is_fully_defined(): batch_size = int(output_shape[0]) output_row_size = output_shape.num_elements() // batch_size else: output_shape = tf.shape(output) batch_size = output_shape[0] output_row_size = tf.size(output) // batch_size inp_shape = tf.shape(inp) # Flatten output to 2-D. with tf.control_dependencies([tf.assert_equal(batch_size, inp_shape[0])]): output = tf.reshape(output, [batch_size, output_row_size]) def loop_fn(i): y = tf.gather(output, i, axis=1) return tf.gradients(y, inp)[0] #if use_pfor: if False: pfor_output = tf.pfor(loop_fn, output_row_size, parallel_iterations=parallel_iterations) else: pfor_output = for_loop( loop_fn, output.dtype, output_row_size, parallel_iterations=parallel_iterations) if pfor_output is None: return None pfor_output = tf.reshape(pfor_output, [output_row_size, batch_size, -1]) output = tf.transpose(pfor_output, [1, 0, 2]) new_shape = tf.concat([output_shape, inp_shape[1:]], axis=0) return tf.reshape(output, new_shape)
dd42fcc9542bba8033a1eb204bf0d3a91b192dbc
20,582
def declare(baseFamily=None, baseDefault=0, derivedFamily=None, derivedDefault=""): """ Declare a pair of components """ # the declaration class base(pyre.component, family=baseFamily): """a component""" b = pyre.properties.int(default=baseDefault) class derived(base, family=derivedFamily): """a derived component""" d = pyre.properties.str(default=derivedDefault) # return the pair to the caller return base, derived
30c8d8f7d264a0e908f4305198b07c3d76a3cfac
20,583
from datetime import datetime def parse_iso8601(dtstring: str) -> datetime: """naive parser for ISO8061 datetime strings, Parameters ---------- dtstring the datetime as string in one of two formats: * ``2017-11-20T07:16:29+0000`` * ``2017-11-20T07:16:29Z`` """ return datetime.strptime( dtstring, '%Y-%m-%dT%H:%M:%SZ' if len(dtstring) == 20 else '%Y-%m-%dT%H:%M:%S%z')
415a4f3a9006109e31ea344cf99e885a3fd2738d
20,584
def CalcCurvature(vertices,faces): """ CalcCurvature recives a list of vertices and faces and the normal at each vertex and calculates the second fundamental matrix and the curvature by least squares, by inverting the 3x3 Normal matrix INPUT: vertices -nX3 array of vertices faces -mX3 array of faces VertexNormals - nX3 matrix (n=number of vertices) containing the normal at each vertex FaceNormals - mX3 matrix (m = number of faces) containing the normal of each face OUTPUT: FaceSFM - a list of 2x2 np arrays of (m = number of faces) second fundamental tensor at the faces VertexSFM - a list of 2x2 np arrays (n = number of vertices) second fundamental tensor at the vertices Other Parameters wfp : mx3 array of vertex voronoi cell area/Mixed area weights as given in Meyer 2002 up,vp : local coordinate system at each vertex e0,e1,e2 : edge vectors """ #list of 2x2 arrays for each vertex VertexSFM = [np.zeros([2,2]) for i in vertices] up = np.zeros(vertices.shape) e0=vertices[faces[:,2]]-vertices[faces[:,1]] e1=vertices[faces[:,0]]-vertices[faces[:,2]] e2=vertices[faces[:,1]]-vertices[faces[:,0]] e0_norm=normr(e0) e1_norm=normr(e1) e2_norm=normr(e2) FaceNormals=0.5*fastcross(e1,e2) #not unit length. holds the area which is needed next VertNormals,wfp=GetVertexNormalsExtra(vertices,faces,FaceNormals,e0,e1,e2) FaceNormals=normr(FaceNormals) #Calculate initial coordinate system up[faces[:,0]]=e2_norm up[faces[:,1]]=e0_norm up[faces[:,2]]=e1_norm #Calculate initial vertex coordinate system up=fastcross(up,VertNormals) up=normr(up) vp=fastcross(VertNormals,up) B=normr(fastcross(FaceNormals,e0_norm)) nfaces=faces.shape[0] # Build a least square problem at each face to get the SFM at each face and solve it using the normal equation scale=1.0/np.sqrt(np.sum((e0[0,:]**2+e1[0,:]**2+e2[0,:]**2)/3.0)) AT = scale*np.array([[inner1d(e0,e0_norm), inner1d(e0,B), np.zeros(nfaces)], [np.zeros(nfaces), inner1d(e0,e0_norm), inner1d(e0,B)], [inner1d(e1,e0_norm), inner1d(e1,B), np.zeros(nfaces)], [np.zeros(nfaces), inner1d(e1,e0_norm), inner1d(e1,B)], [inner1d(e2,e0_norm), inner1d(e2,B), np.zeros(nfaces)], [np.zeros(nfaces), inner1d(e2,e0_norm), inner1d(e2,B)]]).T A = np.transpose(AT,axes=(0,2,1)).copy() dn0=VertNormals[faces[:,2]]-VertNormals[faces[:,1]] dn1=VertNormals[faces[:,0]]-VertNormals[faces[:,2]] dn2=VertNormals[faces[:,1]]-VertNormals[faces[:,0]] b= scale*np.array([inner1d(dn0,e0_norm), inner1d(dn0,B ), inner1d(dn1,e0_norm), inner1d(dn1,B ), inner1d(dn2,e0_norm), inner1d(dn2,B )]).T[:,:,np.newaxis] X1=np.array([np.linalg.pinv(a,-1) for a in A]) X = np.matmul(X1,b) #now calculate curvature per vertex as weighted sum of the face curvature for i,f in enumerate(faces): for j in [0,1,2]: new_ku,new_kuv,new_kv = ProjectCurvatureTensor(e0_norm[i],B[i],FaceNormals[i],X[i][0],X[i][1],X[i][2],up[f[j]],vp[f[j]]) VertexSFM[f[j]]+=wfp[i,j]*np.array([[new_ku,new_kuv],[new_kuv,new_kv]]).squeeze() return VertexSFM,VertNormals
b0e31073fe8aff61e60d0393098cca390bb95708
20,585
from typing import Optional def query_abstracts( q: Optional[str] = None, n_results: Optional[int] = None, index: str = "agenda-2020-1", fields: list = ["title^2", "abstract", "fullname", "institution"], ): """ Query abstracts from a given Elastic index q: str, query n_results: int, number of results from index: str, index of ElasticSearch fields: list, list of fields that are included in the search """ responses = query(q, n_results, index, fields) return responses
4ed554231c863c3164c5368978da900e3647570d
20,586
import typing import pickle def PretrainedEmbeddingIndicesDictionary() -> typing.Dict[str, int]: """Read and return the embeddings indices dictionary.""" with open(INST2VEC_DICITONARY_PATH, "rb") as f: return pickle.load(f)
d4c0c8f5d7c83d99927342c5cacd8fd80a4f7d56
20,587
def color_negative_red(val): """ Takes a scalar and returns a string with the css property `'color: red'` for negative strings, black otherwise. """ color = 'red' if val < 0 else 'black' return 'color: %s' % color
1806af9c915740612a6a11df723f1439c73bde2f
20,588
def get_student_discipline(person_id: str = None): """ Returns student discipline information for a particular person. :param person_id: The numeric ID of the person you're interested in. :returns: String containing xml or an lxml element. """ return get_anonymous('getStudentDiscipline', person_id=person_id)
4e96fb4e9d566af7094b16b29540617bbb230f67
20,590
from re import X def dot(p1, p2): """ Dot product :param p1: :param p2: :return: """ return p1[X] * p2[X] + p1[Y] * p2[Y]
13ba17e8757ebf9022f07d21b58a26376520f84a
20,592
def logout(): """View function which handles a logout request.""" tf_clean_session() if current_user.is_authenticated: logout_user() # No body is required - so if a POST and json - return OK if request.method == "POST" and _security._want_json(request): return _security._render_json({}, 200, headers=None, user=None) return redirect(get_post_logout_redirect())
0343be8ec063b5c215a0a019003cbf137588171a
20,593
def splinter_session_scoped_browser(): """Make it test scoped.""" return False
a7587f6edff821bab3052dca73929201e98dcf56
20,595
from typing import Counter def sample_mask(source, freq_vocab, threshold=1e-3, min_freq=0, seed=None, name=None): """Generates random mask for downsampling high frequency items. Args: source: string `Tensor` of any shape, items to be sampled. freq_vocab: `Counter` with frequencies vocabulary. threshold: `float`, items occurrence threshold. min_freq: `int`, items below that frequency will be treated as unique. seed: `int`, used to create a random seed (optional). See @{tf.random.set_seed} for behavior. name: `string`, a name for the operation (optional). Returns: A boolean `Tensor` of same shape as source: "keep" flags. """ with tf.name_scope(name or 'sample_mask'): source = tf.convert_to_tensor(source, dtype=tf.string, name='source') seed1, seed2 = random_seed.get_seed(seed) if not isinstance(freq_vocab, Counter): raise ValueError('Frequency vocabulary should be a Counter instance') keys, freqs = zip(*freq_vocab.most_common()) return tfmiss_ops.miss_sample_mask( source=source, keys=keys, freqs=freqs, threshold=threshold, min_freq=min_freq, seed=seed1, seed2=seed2 )
30fca98f95ac7a6aa2f3a3576f32abf271a693bb
20,596
def _xList(l): """ """ if l is None: return [] return l
ef09d779c7ebc2beb321d90726f43603c0ac8315
20,597
def IABN2Float(module: nn.Module) -> nn.Module: """If `module` is IABN don't use half precision.""" if isinstance(module, InplaceAbn): module.float() for child in module.children(): IABN2Float(child) return module
587565ad78afd08d3365f637ab5b98b17e977566
20,598
from datetime import datetime def start_of_day(val): """ Return a new datetime.datetime object with values that represent a start of a day. :param val: Date to ... :type val: datetime.datetime | datetime.date :rtype: datetime.datetime """ if type(val) == date: val = datetime.fromordinal(val.toordinal()) return val.replace(hour=0, minute=0, second=0, microsecond=0)
74e302513edf428f825f9e24567e23b3a5e5d4f5
20,599
def pending_mediated_transfer(app_chain, token_network_identifier, amount, identifier): """ Nice to read shortcut to make a LockedTransfer where the secret is _not_ revealed. While the secret is not revealed all apps will be synchronized, meaning they are all going to receive the LockedTransfer message. Returns: The secret used to generate the LockedTransfer """ # pylint: disable=too-many-locals if len(app_chain) < 2: raise ValueError('Cannot make a LockedTransfer with less than two apps') target = app_chain[-1].raiden.address # Generate a secret initiator_channel = views.get_channelstate_by_token_network_and_partner( views.state_from_app(app_chain[0]), token_network_identifier, app_chain[1].raiden.address, ) address = initiator_channel.identifier nonce_int = channel.get_next_nonce(initiator_channel.our_state) nonce_bytes = nonce_int.to_bytes(2, 'big') secret = sha3(address + nonce_bytes) initiator_app = app_chain[0] init_initiator_statechange = initiator_init( initiator_app.raiden, identifier, amount, secret, token_network_identifier, target, ) events = initiator_app.raiden.wal.log_and_dispatch( init_initiator_statechange, initiator_app.raiden.get_block_number(), ) send_transfermessage = must_contain_entry(events, SendLockedTransfer, {}) transfermessage = LockedTransfer.from_event(send_transfermessage) initiator_app.raiden.sign(transfermessage) for mediator_app in app_chain[1:-1]: mediator_init_statechange = mediator_init(mediator_app.raiden, transfermessage) events = mediator_app.raiden.wal.log_and_dispatch( mediator_init_statechange, mediator_app.raiden.get_block_number(), ) send_transfermessage = must_contain_entry(events, SendLockedTransfer, {}) transfermessage = LockedTransfer.from_event(send_transfermessage) mediator_app.raiden.sign(transfermessage) target_app = app_chain[-1] mediator_init_statechange = target_init(transfermessage) events = target_app.raiden.wal.log_and_dispatch( mediator_init_statechange, target_app.raiden.get_block_number(), ) return secret
82ae40ffa45a759f1aac132c3edc221ebd11ae9e
20,600
def get_comments(post, sort_mode='hot', max_depth=5, max_breadth=5): """ Retrieves comments for a post. :param post: The unique id of a Post from which Comments will be returned. :type post: `str` or :ref:`Post` :param str sort_mode: The order that the Posts will be sorted by. Options are: "top" (ranked by upvotes minus downvotes), "best" (similar to top, except that it uses a more complicated algorithm to have good posts jump to the top and stay there, and bad comments to work their way down, see http://blog.reddit.com/2009/10/reddits-new-comment-sorting-system.html), "hot" (similar to "top", but weighted by time so that recent, popular posts are put near the top), "new" (posts will be sorted by creation time). :param int max_depth: The maximum depth that comments will be retrieved from (i.e., how many descendants from the topmost comment). To go down infinitely, use None. :param int max_breadth: The maximum breadth that comments will be retrieved from (i.e., how many siblings from the topmost comment). Note that this breadth applies at every subtree - in effect, it is the branching factor. To get all siblings, use None. :returns: list of Comment """ if sort_mode not in SORT_MODES: raise RedditException("Unknown sort mode: {}".format(sort_mode)) if isinstance(post, Post): post = post.id elif not isinstance(post, str): raise RedditException("The post parameter should be a String or a Post") result = _get_comments_string(post, sort_mode, max_depth, max_breadth) if result: try: json_result = _from_json(result)[1]['data']['children'] except ValueError: raise RedditException("The response from the server didn't make any sense.") if "error" in json_result: raise RedditException("Error from Reddit: {}".format(json_result.get("error", "Unknown error."))) if max_breadth is None: return [Comment._from_json(r, post, max_depth=max_depth-1) for r in json_result] else: return [Comment._from_json(r, post, max_depth=max_depth-1, max_breadth=max_breadth) for r in json_result[:max_breadth]] else: if _CONNECTED: raise RedditException("No response from the server.") else: raise RedditException("No data was in the cache for this comment.")
333009358f622560135e7e239741613356387d55
20,601
def neighbor_json(json): """Read neighbor game from json""" utils.check( json['type'].split('.', 1)[0] == 'neighbor', 'incorrect type') return _NeighborDeviationGame( gamereader.loadj(json['model']), num_neighbors=json.get('neighbors', json.get('devs', None)))
19891d59970610ad412fd4eb204477c96d1d82fd
20,602
def get_b16_config(): """Returns the ViT-B/16 configuration.""" config = ml_collections.ConfigDict() config.name = 'ViT-B_16' config.half_precision = True config.encoder = ml_collections.ConfigDict() config.encoder.patches = ml_collections.ConfigDict({'size': (16, 16)}) config.encoder.hidden_size = 768 config.encoder.mlp_dim = 3072 config.encoder.num_heads = 12 config.encoder.num_layers = 12 config.encoder.attention_dropout_rate = 0.0 config.encoder.dropout_rate = 0.0 config.encoder.drop_path_rate = 0.0 config.decoder = ml_collections.ConfigDict() config.decoder.hidden_size = 384 config.decoder.mlp_dim = 1536 config.decoder.num_heads = 6 config.decoder.num_layers = 4 config.decoder.attention_dropout_rate = 0.0 config.decoder.dropout_rate = 0.0 config.decoder.drop_path_rate = 0.0 config.decoder.out_dim = 768 return config
6afdb862bd07c21d569db65fbb1780492ff153f2
20,603
from typing import Container def build_container_hierarchy(dct): """Create a hierarchy of Containers based on the contents of a nested dict. There will always be a single top level scoping Container regardless of the contents of dct. """ top = Container() for key,val in dct.items(): if isinstance(val, dict): # it's a dict, so this is a Container top.add(key, build_container_hierarchy(val)) else: setattr(top, key, val) return top
7fb629d7f570e5f77b381766b5c2d909d7c0d6c1
20,604
def occ_frac(stop_rec_range, bin_size_minutes, edge_bins=1): """ Computes fractional occupancy in inbin and outbin. Parameters ---------- stop_rec_range: list consisting of [intime, outtime] bin_size_minutes: bin size in minutes edge_bins: 1=fractional, 2=whole bin Returns ------- [inbin frac, outbin frac] where each is a real number in [0.0,1.0] """ intime = stop_rec_range[0] outtime = stop_rec_range[1] bin_freq_str = '{}T'.format(int(bin_size_minutes)) indtbin = intime.floor(bin_freq_str) outdtbin = outtime.floor(bin_freq_str) # inbin occupancy if edge_bins == 1: right_edge = min(indtbin + timedelta(minutes=bin_size_minutes), outtime) inbin_occ_secs = (right_edge - intime).total_seconds() inbin_occ_frac = inbin_occ_secs / (bin_size_minutes * 60.0) else: inbin_occ_frac = 1.0 # outbin occupancy if indtbin == outdtbin: outbin_occ_frac = 0.0 # Use inbin_occ_frac else: if edge_bins == 1: left_edge = max(outdtbin, intime) outbin_occ_secs = (outtime - left_edge).total_seconds() outbin_occ_frac = outbin_occ_secs / (bin_size_minutes * 60.0) else: outbin_occ_frac = 1.0 assert 1.0 >= inbin_occ_frac >= 0.0, \ "bad inbin_occ_frac={:.3f} in={} out={}".format(inbin_occ_frac, intime, outtime) assert 1.0 >= outbin_occ_frac >= 0.0, \ "bad outbin_occ_frac={:.3f} in={} out={}".format(outbin_occ_frac, intime, outtime) return [inbin_occ_frac, outbin_occ_frac]
d3d93cd92386a98c865c61ad2b595786aa5d4837
20,605
def geomprogr_mesh(N=None, a=0, L=None, Delta0=None, ratio=None): """Compute a sequence of values according to a geometric progression. Different options are possible with the input number of intervals in the sequence N, the length of the first interval Delta0, the total length L and the ratio of the sought geometric progression. Three of them are requested in input to find a valid sequence. The sequence is drawn within the points a and b.""" if list(locals().values()).count(None) > 1: raise ValueError('Insufficient number of input data for a sequence') if ratio is not None: if (ratio < 0): raise ValueError('negative ratio is not valid') if L is not None: if (L < 0): raise ValueError('negative total length is not valid') if Delta0 is not None: if (Delta0 < 0): raise ValueError('negative length of the 1st interval is not valid') if N is not None: if (N < 0): raise ValueError('negative number of intervals is not valid') if N is None: if ratio < 1: N = np.log(1 - L / Delta0 * (1 - ratio)) / np.log(ratio) else: N = np.log(1 + L / Delta0 * (ratio - 1)) / np.log(ratio) elif L is None: if ratio < 1: L = Delta0 * (1 - ratio**N) / (1 - ratio) else: L = Delta0 * (ratio**N - 1) / (ratio - 1) elif Delta0 is None: if not np.isclose(ratio, 1): Delta0 = L * (1 - ratio) / (1 - ratio**N) else: Delta0 = L / float(N) elif ratio is None: f = lambda q: q**N - L / Delta0 * q + L / Delta0 - 1 x = L / float(N) if Delta0 > x: ratio = brentq(f, 0, 1 - 1.e-6) elif Delta0 < x: ratio = brentq(f, 1 + 1.e-6, 20) else: ratio = 1 if np.isclose(ratio, 1): r = np.linspace(0, L, N + 1) else: r = np.insert(np.full(N - 1, ratio), 0, 1) r = np.cumprod(r) * Delta0 r = np.insert(np.cumsum(r), 0, 0) return r + a
3de67b8ee2d75b69638648316fcfad07dbabde3a
20,606
def list_subclasses(package, base_class): """ Dynamically import all modules in a package and scan for all subclasses of a base class. `package`: the package to import `base_class`: the base class to scan for subclasses return: a dictionary of possible subclasses with class name as key and class type information as value """ import_modules(package) subclasses = all_subclasses(base_class) return dict(zip(map(lambda c: c.__name__, subclasses), subclasses))
e5570c30c89869b702c1c1015914540403be356f
20,607
def maxima_in_range(r, g_r, r_min, r_max): """Find the maxima in a range of r, g_r values""" idx = np.where(np.logical_and(np.greater_equal(r, r_min), np.greater_equal(r_max, r))) g_r_slice = g_r[idx] g_r_max = g_r_slice[g_r_slice.argmax()] idx_max, _ = find_nearest(g_r, g_r_max) return r[idx_max], g_r[idx_max]
14a4e3dc65465dd2e515ac09fb74704a366368b4
20,608
def shared_fit_preprocessing(fit_class): """ Shared preprocessing to get X, y, class_order, and row_weights. Used by _materialize method for both python and R fitting. :param fit_class: PythonFit or RFit class :return: X: pd.DataFrame of features to use in fit y: pd.Series of target to use in fit class_order: array specifying class order, or None row_weights: pd.Series of row weights, or None """ # read in data if fit_class.input_filename.endswith(".mtx"): colnames = None if fit_class.sparse_column_file: colnames = [column.strip() for column in open(fit_class.sparse_column_file).readlines()] df = pd.DataFrame.sparse.from_spmatrix(mmread(fit_class.input_filename), columns=colnames) else: df = pd.read_csv(fit_class.input_filename) # get num rows to use if fit_class.num_rows == "ALL": fit_class.num_rows = len(df) else: if fit_class.num_rows > len(df): raise DrumCommonException( "Requested number of rows greater than data length {} > {}".format( fit_class.num_rows, len(df) ) ) fit_class.num_rows = int(fit_class.num_rows) # get target and features, resample and modify nrows if needed if fit_class.target_filename or fit_class.target_name: if fit_class.target_filename: y_unsampled = pd.read_csv(fit_class.target_filename, index_col=False) assert ( len(y_unsampled.columns) == 1 ), "Your target dataset at path {} has {} columns named {}".format( fit_class.target_filename, len(y_unsampled.columns), y_unsampled.columns ) assert len(df) == len( y_unsampled ), "Your input data has {} entries, but your target data has {} entries".format( len(df), len(y_unsampled) ) if y_unsampled.columns[0] in df.columns: y_unsampled.columns = ["__target__"] df = df.merge(y_unsampled, left_index=True, right_index=True) assert len(y_unsampled.columns.values) == 1 fit_class.target_name = y_unsampled.columns.values[0] df = df.dropna(subset=[fit_class.target_name]) X = df.drop(fit_class.target_name, axis=1).sample(fit_class.num_rows, random_state=1) y = df[fit_class.target_name].sample(fit_class.num_rows, random_state=1) else: X = df.sample(fit_class.num_rows, random_state=1) y = None row_weights = extract_weights(X, fit_class) class_order = extract_class_order(fit_class) return X, y, class_order, row_weights
b87831540ba6fc4bc65fe0532e2af0574515c3a3
20,609
import json def webhook(): """ Triggers on each GET and POST request. Handles GET and POST requests using this function. :return: Return status code acknowledge for the GET and POST request """ if request.method == 'POST': data = request.get_json(force=True) log(json.dumps(data)) # you may not want to log every incoming message in production, but it's good for testing if data["object"] == "page": for entry in data["entry"]: for event in entry["messaging"]: sender_id = event["sender"]["id"] if 'message' in event and 'text' in event['message']: message_text = event["message"]["text"] if event.get("message").get("quick_reply"): feedback_payload = event["message"]["quick_reply"]["payload"] handle_message(feedback_payload, sender_id, message_type="feedback") else: handle_message(message_text, sender_id) if 'postback' in event and 'payload' in event['postback']: postback_payload = event['postback']['payload'] log(postback_payload) handle_message(postback_payload, sender_id, message_type="feedback") if event.get("delivery"): pass if event.get("optin"): pass return "ok", 200 elif request.method == 'GET': # Verification if request.args.get("hub.verify_token") == VERIFY_TOKEN: return request.args.get('hub.challenge'), 200 else: return 'Error, wrong validation token', 403
0c9f39c1159990e6a84dc9ce0091078397a3b65e
20,610
def extract_winner(state: 'TicTacToeState') -> str: """ Return the winner of the game, or announce if the game resulted in a tie. """ winner = 'No one' tictactoe = TicTacToeGame(True) tictactoe.current_state = state if tictactoe.is_winner('O'): winner = 'O' elif tictactoe.is_winner('X'): winner = 'X' return winner
c92cef3bc3214923107871d5f044df16baf63401
20,611
def _prensor_value_fetch(prensor_tree: prensor.Prensor): """Fetch function for PrensorValue. See the document in session_lib.""" # pylint: disable=protected-access type_spec = prensor_tree._type_spec components = type_spec._to_components(prensor_tree) def _construct_prensor_value(component_values): return _prensor_value_from_type_spec_and_component_values( type_spec, iter(component_values)) return components, _construct_prensor_value
ccea4a94fff5f17c6e650e1ac820ec6da1be023d
20,612
def request_validation_error(error): """Handles Value Errors from bad data""" message = str(error) app.logger.error(message) return { 'status_code': status.HTTP_400_BAD_REQUEST, 'error': 'Bad Request', 'message': message }, status.HTTP_400_BAD_REQUEST
1d5c779286d83d756e1d73201f1274dbec7cf84b
20,614
def all(request): """Handle places list page.""" places = Place.objects.all() context = {'places': places} return render(request, 'rental/list_place.html', context)
d978a4ec22004a1a863e57113639722eaf1f02cf
20,615
def get_key_by_value(dictionary, search_value): """ searchs a value in a dicionary and returns the key of the first occurrence :param dictionary: dictionary to search in :param search_value: value to search for """ for key, value in dictionary.iteritems(): if value == search_value: return ugettext(key)
febad38e70c973de23ce4e1a5702df92860a6c2e
20,616
def _subtract_ten(x): """Subtracts 10 from x using control flow ops. This function is equivalent to "x - 10" but uses a tf.while_loop, in order to test the use of functions that involve control flow ops. Args: x: A tensor of integral type. Returns: A tensor representing x - 10. """ def stop_condition(counter, x_minus_counter): del x_minus_counter # unused return tf.less(counter, 10) def iteration(counter, x_minus_counter): return tf.add(counter, 1), tf.add(x_minus_counter, -1) initial_values = [tf.constant(0), x] return tf.while_loop(stop_condition, iteration, initial_values)[1]
f2db402e5c98251dc93036be60f02eb88a4d13d9
20,617
def load_fortune_file(f: str) -> list: """ load fortunes from a file and return it as list """ saved = [] try: with open(f, 'r') as datfile: text = datfile.read() for line in text.split('%'): if len(line.strip()) > 0: saved.append(line) except OSError: app.logger.warning('fail to process file: {}'.format(f)) pass else: return saved
824ddb0bcb34abf597fb317d10fa3eeab99a292e
20,618
def maskStats(wins, last_win, mask, maxLen): """ return a three-element list with the first element being the total proportion of the window that is masked, the second element being a list of masked positions that are relative to the windown start=0 and the window end = window length, and the third being the last window before breaking to expidite the next loop """ chrom = wins[0].split(":")[0] a = wins[1] L = wins[2] b = a + L prop = [0.0,[],0] try: for i in range(last_win, len(mask[chrom])): x, y = mask[chrom][i][0], mask[chrom][i][1] if y < a: continue if b < x: return prop else: # i.e. [a--b] and [x--y] overlap if a >= x and b <= y: return [1.0, [[0,maxLen]], i] elif a >= x and b > y: win_prop = (y-a)/float(b-a) prop[0] += win_prop prop[1].append([0,int(win_prop * maxLen)]) prop[2] = i elif b <= y and a < x: win_prop = (b-x)/float(b-a) prop[0] += win_prop prop[1].append([int((1-win_prop)*maxLen),maxLen]) prop[2] = i else: win_prop = (y-x)/float(b-a) prop[0] += win_prop prop[1].append([int(((x-a)/float(b-a))*maxLen), int(((y-a)/float(b-a))*maxLen)]) prop[2] = i return prop except KeyError: return prop
b5d75d2e86f1b21bf35cbc69d360cd1639c5527b
20,619
def dsoftmax(Z): """Given a (m,n) matrix, returns a (m,n,n) jacobian matrix""" m,n=np.shape(Z) softZ=(softmax(Z)) prodtensor=np.einsum("ij,ik->ijk",softZ,softZ) diagtensor=np.einsum('ij,jk->ijk', softZ, np.eye(n, n)) return diagtensor-prodtensor
15296d493608dac1fc9843dd8a7d6eaaf29c4839
20,620
async def vbd_unplug(cluster_id: str, vbd_uuid: str): """Unplug from VBD""" try: session = create_session( _id=cluster_id, get_xen_clusters=Settings.get_xen_clusters() ) vbd: VBD = VBD.get_by_uuid(session=session, uuid=vbd_uuid) if vbd is not None: ret = dict(success=vbd.unplug()) else: ret = dict(success=False) session.xenapi.session.logout() return ret except Failure as xenapi_error: raise HTTPException( status_code=500, detail=xenapi_failure_jsonify(xenapi_error) ) except Fault as xml_rpc_error: raise HTTPException( status_code=int(xml_rpc_error.faultCode), detail=xml_rpc_error.faultString, ) except RemoteDisconnected as rd_error: raise HTTPException(status_code=500, detail=rd_error.strerror)
8b36c55354b35470bceb47ef212aa183be09fad4
20,621
def calculate_age(created, now): """ Pprepare a Docker CLI-like output of image age. After researching `datetime`, `dateutil` and other libraries I decided to do this manually to get as close as possible to Docker CLI output. `created` and `now` are both datetime.datetime objects. """ age = {} rdelta = relativedelta.relativedelta(now, created) difference = now - created if rdelta.years > 0: age['number'] = rdelta.years age['unit'] = 'years' elif rdelta.years == 0 and difference >= timedelta(days=60): age['number'] = rdelta.months age['unit'] = 'months' elif rdelta.years == 0 and difference < timedelta(days=60) and difference >= timedelta(days=14): days = 0 if rdelta.months == 1: days = 30 days += rdelta.days weeks = round(days / 7) age['number'] = weeks age['unit'] = 'weeks' elif rdelta.years == 0 and difference < timedelta(days=14) and difference >= timedelta(days=1): age['number'] = rdelta.days age['unit'] = 'days' elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours >= 1: age['number'] = rdelta.hours age['unit'] = 'hours' elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours < 1 and rdelta.minutes > 0: age['number'] = rdelta.minutes age['unit'] = 'minutes' elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours < 1 and rdelta.minutes <= 0 and rdelta.seconds > 0: age['number'] = rdelta.seconds age['unit'] = 'seconds' elif rdelta.years == 0 and difference < timedelta(days=1) and rdelta.hours < 1 and rdelta.minutes <= 0 and rdelta.seconds <= 0: age['number'] = 1 age['unit'] = 'second' else: raise DkrlsError(f'Encountered age of an image which this CLI can\'t handle: {rdelta}') return age
f2b1a6fc643a78c9a2d3cdd0f497e05c3294eb03
20,622
def Maxout(x, num_unit): """ Maxout as in the paper `Maxout Networks <http://arxiv.org/abs/1302.4389>`_. Args: x (tf.Tensor): a NHWC or NC tensor. Channel has to be known. num_unit (int): a int. Must be divisible by C. Returns: tf.Tensor: of shape NHW(C/num_unit) named ``output``. """ input_shape = x.get_shape().as_list() ndim = len(input_shape) assert ndim == 4 or ndim == 2 ch = input_shape[-1] assert ch is not None and ch % num_unit == 0 if ndim == 4: x = tf.reshape(x, [-1, input_shape[1], input_shape[2], ch / num_unit, num_unit]) else: x = tf.reshape(x, [-1, ch / num_unit, num_unit]) return tf.reduce_max(x, ndim, name='output')
d10294d7ad180b47c4276e3bb0f43e7ac4a9fa3b
20,623
import re def is_youtube_url(url: str) -> bool: """Checks if a string is a youtube url Args: url (str): youtube url Returns: bool: true of false """ match = re.match(r"^(https?\:\/\/)?(www\.youtube\.com|youtu\.be)\/.+$", url) return bool(match)
97536b8e7267fb5a72c68f242b3f5d6cbd1b9492
20,624
def time_nanosleep(): """ Delay for a number of seconds and nanoseconds""" return NotImplementedError()
9ec91f2ef2656b5a481425dc65dc9f81a07386c2
20,625
import jinja2 def render_series_fragment(site_config): """ Adds "other posts in this series" fragment to series posts. """ series_fragment = open("_includes/posts_in_series.html", "r").read() for post_object in site_config["series_posts"]: print("Generating 'Other posts in this series' fragment for " + post_object[1]) category, post_name, page_url = post_object loader = jinja2.FileSystemLoader(searchpath="./") template = jinja2.Environment(loader=loader) rendered_series_text = template.from_string(series_fragment) posts_to_show = site_config["categories"].get(category) see_more_link = False if len(posts_to_show) > 10: see_more_link = True category_slug = ( category.replace(" ", "-").lower().replace("(", "").replace(")", "") ) rendered_series_text = rendered_series_text.render( posts_in_series=posts_to_show[:10], see_more_link=see_more_link, site=site_config, category_slug=category_slug, page={"url": page_url}, ) year_month_date = "/".join(post_name.split("-")[:3]) + "/" post_name = ( "-".join(post_name.split("-")[3:]).replace(".md", "").replace(".html", "") ) with open(OUTPUT + year_month_date + post_name + "/index.html", "r") as file: file_content = file.read() file_content = file_content.replace( "<!--- posts_in_series -->", rendered_series_text ) with open(OUTPUT + year_month_date + post_name + "/index.html", "w") as file: file.write(file_content) return series_fragment
6cf947148af2978e926d51e9007684b9580d2cb0
20,627
def get_class_by_name(name): """Gets a class object by its name, e.g. sklearn.linear_model.LogisticRegression""" if name.startswith('cid.analytics'): # We changed package names in March 2017. This preserves compatibility with old models. name = name.replace('cid.analytics', 'analytics.core') elif name.startswith('cid.'): name = name.replace('cid.', 'analytics.') module, class_name = name.rsplit('.', 1) return getattr(import_module(module), class_name)
bf52eb8472e63cbb453183b57c5275d592665fc9
20,629
import functools def _single_optimize( direction, criterion, criterion_kwargs, params, algorithm, constraints, algo_options, derivative, derivative_kwargs, criterion_and_derivative, criterion_and_derivative_kwargs, numdiff_options, logging, log_options, error_handling, error_penalty, cache_size, scaling_options, ): """Minimize or maximize *criterion* using *algorithm* subject to *constraints*. See the docstring of ``_optimize`` for an explanation of all arguments. Returns: dict: The optimization result. """ # store all arguments in a dictionary to save them in the database later problem_data = { "direction": direction, # "criterion"-criterion, "criterion_kwargs": criterion_kwargs, "algorithm": algorithm, "constraints": constraints, "algo_options": algo_options, # "derivative"-derivative, "derivative_kwargs": derivative_kwargs, # "criterion_and_derivative"-criterion_and_derivative, "criterion_and_derivative_kwargs": criterion_and_derivative_kwargs, "numdiff_options": numdiff_options, "log_options": log_options, "error_handling": error_handling, "error_penalty": error_penalty, "cache_size": int(cache_size), } # partial the kwargs into corresponding functions criterion = functools.partial(criterion, **criterion_kwargs) if derivative is not None: derivative = functools.partial(derivative, **derivative_kwargs) if criterion_and_derivative is not None: criterion_and_derivative = functools.partial( criterion_and_derivative, **criterion_and_derivative_kwargs ) # process params and constraints params = add_default_bounds_to_params(params) for col in ["value", "lower_bound", "upper_bound"]: params[col] = params[col].astype(float) check_params_are_valid(params) # calculate scaling factor and offset if scaling_options not in (None, {}): scaling_factor, scaling_offset = calculate_scaling_factor_and_offset( params=params, constraints=constraints, criterion=criterion, **scaling_options, ) else: scaling_factor, scaling_offset = None, None # name and group column are needed in the dashboard but could lead to problems # if present anywhere else params_with_name_and_group = _add_name_and_group_columns_to_params(params) problem_data["params"] = params_with_name_and_group params_to_internal, params_from_internal = get_reparametrize_functions( params=params, constraints=constraints, scaling_factor=scaling_factor, scaling_offset=scaling_offset, ) # get internal parameters and bounds x = params_to_internal(params["value"].to_numpy()) lower_bounds, upper_bounds = get_internal_bounds( params=params, constraints=constraints, scaling_factor=scaling_factor, scaling_offset=scaling_offset, ) # process algorithm and algo_options if isinstance(algorithm, str): algo_name = algorithm else: algo_name = getattr(algorithm, "name", "your algorithm") if isinstance(algorithm, str): try: algorithm = AVAILABLE_ALGORITHMS[algorithm] except KeyError: proposed = propose_algorithms(algorithm, list(AVAILABLE_ALGORITHMS)) raise ValueError( f"Invalid algorithm: {algorithm}. Did you mean {proposed}?" ) from None algo_options = _adjust_options_to_algorithms( algo_options, lower_bounds, upper_bounds, algorithm, algo_name ) # get convert derivative convert_derivative = get_derivative_conversion_function( params=params, constraints=constraints, scaling_factor=scaling_factor, scaling_offset=scaling_offset, ) # do first function evaluation first_eval = { "internal_params": x, "external_params": params, "output": criterion(params), } # fill numdiff_options with defaults numdiff_options = _fill_numdiff_options_with_defaults( numdiff_options, lower_bounds, upper_bounds ) # create and initialize the database if not logging: database = False else: database = _create_and_initialize_database( logging, log_options, first_eval, problem_data ) # set default error penalty error_penalty = _fill_error_penalty_with_defaults( error_penalty, first_eval, direction ) # create cache x_hash = hash_array(x) cache = {x_hash: {"criterion": first_eval["output"]}} # partial the internal_criterion_and_derivative_template internal_criterion_and_derivative = functools.partial( internal_criterion_and_derivative_template, direction=direction, criterion=criterion, params=params, reparametrize_from_internal=params_from_internal, convert_derivative=convert_derivative, derivative=derivative, criterion_and_derivative=criterion_and_derivative, numdiff_options=numdiff_options, database=database, database_path=logging, log_options=log_options, error_handling=error_handling, error_penalty=error_penalty, first_criterion_evaluation=first_eval, cache=cache, cache_size=cache_size, ) res = algorithm(internal_criterion_and_derivative, x, **algo_options) p = params.copy() p["value"] = params_from_internal(res["solution_x"]) res["solution_params"] = p if "solution_criterion" not in res: res["solution_criterion"] = criterion(p) if direction == "maximize": res["solution_criterion"] = -res["solution_criterion"] # in the long run we can get some of those from the database if logging was used. optional_entries = [ "solution_derivative", "solution_hessian", "n_criterion_evaluations", "n_derivative_evaluations", "n_iterations", "success", "reached_convergence_criterion", "message", ] for entry in optional_entries: res[entry] = res.get(entry, f"Not reported by {algo_name}") if logging: _log_final_status(res, database, logging, log_options) return res
9f349f8e1124da3a2747b3880969a90e76aad52a
20,630
def item_len(item): """return length of the string format of item""" return len(str(item))
7d68629a5c2ae664d267844fc90006a7f23df1ba
20,631
def get_progress_logger(): """Returns the swift progress logger""" return progress_logger
b1c0e8e206e2f051dcb97337dc51d4971fe0aa8b
20,632
def instantiate_me(spec2d_files, spectrograph, **kwargs): """ Instantiate the CoAdd2d subclass appropriate for the provided spectrograph. The class must be subclassed from Reduce. See :class:`Reduce` for the description of the valid keyword arguments. Args: spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): The instrument used to collect the data to be reduced. tslits_dict: dict dictionary containing slit/order boundary information tilts (np.ndarray): Returns: :class:`PypeIt`: One of the classes with :class:`PypeIt` as its base. """ indx = [ c.__name__ == (spectrograph.pypeline + 'Coadd2d') for c in Coadd2d.__subclasses__() ] if not np.any(indx): msgs.error('Pipeline {0} is not defined!'.format(spectrograph.pypeline)) return Coadd2d.__subclasses__()[np.where(indx)[0][0]](spec2d_files, spectrograph, **kwargs)
f9961231ead7c3ece5757e5b18dc5620a3492a40
20,634
def quoteattr(s, table=ESCAPE_ATTR_TABLE): """Escape and quote an attribute value. """ for c, r in table: if c in s: s = s.replace(c, r) return '"%s"' % s
7af3e8ed6bfc0c23a957881ca41065d24cb288d5
20,635
def is_numeric(array): """Return False if any value in the array or list is not numeric Note boolean values are taken as numeric""" for i in array: try: float(i) except ValueError: return False else: return True
2ab0bb3e6c35e859e54e435671b5525c6392f66c
20,636
def reductions_right(collection, callback=None, accumulator=None): """This method is like :func:`reductions` except that it iterates over elements of a `collection` from right to left. Args: collection (list|dict): Collection to iterate over. callback (mixed): Callback applied per iteration. accumulator (mixed, optional): Initial value of aggregator. Default is to use the result of the first iteration. Returns: list: Results of each reduction operation. Example: >>> reductions_right([1, 2, 3, 4], lambda total, x: total ** x) [64, 4096, 4096] Note: The last element of the returned list would be the result of using :func:`reduce_`. .. versionadded:: 2.0.0 """ return reductions(collection, callback, accumulator, from_right=True)
eba2de662a6386d609da8cf3011010ae822c0440
20,637
import math def pelt_settling_time(margin=1, init=0, final=PELT_SCALE, window=PELT_WINDOW, half_life=PELT_HALF_LIFE, scale=PELT_SCALE): """ Compute an approximation of the PELT settling time. :param margin: How close to the final value we want to get, in PELT units. :type margin_pct: float :param init: Initial PELT value. :type init: float :param final: Final PELT value. :type final: float :param window: PELT window in seconds. :type window: float :param half_life: PELT half life, in number of windows. :type half_life: int :param scale: PELT scale. :type scale: float .. note:: The PELT signal is approximated as a first order filter. This does not take into account the averaging inside a window, but the window is small enough in practice for that effect to be negligible. """ tau = _pelt_tau(half_life, window) # Response of a first order low pass filter: # y(t) = u(t) * (1 - exp(-t/tau)) # We want to find `t` such as the output y(t) is as close as we want from # the input u(t): # A * u(t) = u(t) * (1 - exp(-t/tau)) # A is how close from u(t) we want the output to get after a time `t` # From which follows: # A = (1 - exp(-t/tau)) # t = -tau * log(1-A) # Since the equation we have is for a step response, i.e. from 0 to a final # value delta = abs(final - init) # Since margin and delta are in the same unit, we don't have to normalize # them to `scale` first. relative_margin = (margin / delta) A = 1 - relative_margin settling_time = - tau * math.log(1 - A) return settling_time
c8d53d1132bc45278f2c127ed95ce10cfea0498b
20,638
def InstancesOverlap(instanceList,instance): """Returns True if instance contains a vertex that is contained in an instance of the given instanceList.""" for instance2 in instanceList: if InstanceOverlap(instance,instance2): return True return False
634312b7e8d2ce4e36826410fcd1f6c3c06a40ce
20,640
def calc_qm_lea(p_zone_ref, temp_zone, temp_ext, u_wind_site, dict_props_nat_vent): """ Calculation of leakage infiltration and exfiltration air mass flow as a function of zone indoor reference pressure :param p_zone_ref: zone reference pressure (Pa) :param temp_zone: air temperature in ventilation zone (°C) :param temp_ext: exterior air temperature (°C) :param u_wind_site: wind velocity (m/s) :param dict_props_nat_vent: dictionary containing natural ventilation properties of zone :returns: - qm_lea_in : air mass flow rate into zone through leakages (kg/h) - qm_lea_out : air mass flow rate out of zone through leakages (kg/h) """ # get default leakage paths from locals coeff_lea_path = dict_props_nat_vent['coeff_lea_path'] height_lea_path = dict_props_nat_vent['height_lea_path'] # lookup wind pressure coefficients for leakage paths from locals coeff_wind_pressure_path = dict_props_nat_vent['coeff_wind_pressure_path_lea'] # calculation of pressure difference at leakage path delta_p_path = calc_delta_p_path(p_zone_ref, height_lea_path, temp_zone, coeff_wind_pressure_path, u_wind_site, temp_ext) # calculation of leakage air volume flow at path qv_lea_path = calc_qv_lea_path(coeff_lea_path, delta_p_path) # Eq. (65) in [1], infiltration is sum of air flows greater zero qv_lea_in = qv_lea_path[np.where(qv_lea_path > 0)].sum() # Eq. (66) in [1], exfiltration is sum of air flows smaller zero qv_lea_out = qv_lea_path[np.where(qv_lea_path < 0)].sum() # conversion to air mass flows according to 6.4.3.8 in [1] # Eq. (67) in [1] qm_lea_in = qv_lea_in * calc_rho_air(temp_ext) # Eq. (68) in [1] qm_lea_out = qv_lea_out * calc_rho_air(temp_zone) return qm_lea_in, qm_lea_out
4d3f4789b3faedf68b9de3b3e6c8f17bcb478a51
20,641
async def ban(bon): """ For .ban command, bans the replied/tagged person """ # Here laying the sanity check chat = await bon.get_chat() admin = chat.admin_rights creator = chat.creator # Well if not (admin or creator): return await bon.edit(NO_ADMIN) user, reason = await get_user_from_event(bon) if not user: return # Announce that we're going to whack the pest await bon.edit("**Banindo...**") try: await bon.client(EditBannedRequest(bon.chat_id, user.id, BANNED_RIGHTS)) except BadRequestError: return await bon.edit(NO_PERM) # Helps ban group join spammers more easily try: reply = await bon.get_reply_message() if reply: await reply.delete() except BadRequestError: return await bon.edit( "**Não tenho direitos de excluir mensagens, mas o usuário foi banido!**" ) # Delete message and then tell that the command # is done gracefully # Shout out the ID, so that fedadmins can fban later if reason: await bon.edit(f"**{str(user.id)}** foi banido!\nMotivo: {reason}") else: await bon.edit(f"**{str(user.id)}** foi banido!") # Announce to the logging group if we have banned the person # successfully! if BOTLOG: await bon.client.send_message( BOTLOG_CHATID, "#BAN\n" f"USUÁRIO: [{user.first_name}](tg://user?id={user.id})\n" f"CHAT: {bon.chat.title}(`{bon.chat_id}`)", )
f79f16c5e2722f576511a528f546a7f87f7e5236
20,642
def read_offset(rt_info): """ 获取所有分区的offset :param rt_info: rt的详细信息 :return: offset_msgs 和 offset_info """ rt_id = rt_info[RESULT_TABLE_ID] task_config = get_task_base_conf_by_name(f"{HDFS}-table_{rt_id}") if not task_config: return {} try: partition_num = task_config[TASKS_MAX] webhdfs_addr = _get_webhdfs_addr_by_rt(rt_info) offset_dir = get_offset_dir( webhdfs_addr, task_config[GROUP_ID], task_config[NAME], task_config[TOPICS_DIR], partition_num ) offset_msgs = {} if offset_dir: for p in range(partition_num): files = _get_hdfs_dir_files(webhdfs_addr, f"{offset_dir}/{p}") offset = get_max_offset(files) if files else "-1" topic_partition = f"table_{rt_id}-{p}" offset_msgs[topic_partition] = offset logger.info(f"rt {rt_id} get offset_msgs from hdfs offset dir: {offset_msgs}") return offset_msgs except Exception: logger.warning(f"failed to get offset_msgs for rt {rt_id}", exc_info=True) return {}
cc890301d4403a7815480ad0b414e16e26283fa7
20,643
def _CalculateElementMaxNCharge(mol,AtomicNum=6): """ ################################################################# **Internal used only** Most negative charge on atom with atomic number equal to n ################################################################# """ Hmol=Chem.AddHs(mol) GMCharge.ComputeGasteigerCharges(Hmol,iter_step) res=[] for atom in Hmol.GetAtoms(): if atom.GetAtomicNum()==AtomicNum: res.append(float(atom.GetProp('_GasteigerCharge'))) if res==[]: return 0 else: return min(res)
f7bd9957c6e958f31cccc2bc20d6651baaf2f5fa
20,644
def check_stability(lambda0, W, mu, tau, dt_max): """Check if the model is stable for given parameter estimates.""" N, _ = W.shape model = NetworkPoisson(N=N, dt_max=dt_max) model.lamb = lambda0 model.W = W model.mu = mu model.tau = tau return model.check_stability(return_value=True)
d417bdba0f236edf5f5c9e17c09e2d2a93bf2b4a
20,646
import re def pid2id(pid): """convert pid to slurm jobid""" with open('/proc/%s/cgroup' % pid) as f: for line in f: m = re.search('.*slurm\/uid_.*\/job_(\d+)\/.*', line) if m: return m.group(1) return None
e7d0ee60d5a8930b8a6f761d5c27451a28b6ec2a
20,647
import copy def multiaxis_scatterplot(xdata, ydata, *, axes_loc, xlabel='', ylabel='', title='', num_cols=1, num_rows=1, saveas='mscatterplot', **kwargs): """ Create a scatter plot with multiple axes. :param xdata: list of arraylikes, passed on to the plotting functions for each axis (x-axis) :param ydata: list of arraylikes, passed on to the plotting functions for each axis (y-axis) :param axes_loc: list of tuples of two integers, location of each axis :param xlabel: str or list of str, labels for the x axis :param ylabel: str or list of str, labels for the y-axis :param title: str or list of str, titles for the subplots :param num_rows: int, how many rows of axis are created :param num_cols: int, how many columns of axis are created :param saveas: str filename of the saved file Special Kwargs: :param subplot_params: dict with integer keys, can contain all valid kwargs for :py:func:`multiple_scatterplots()` with the integer key denoting to which subplot the changes are applied :param axes_kwargs: dict with integer keys, additional arguments to pass on to `subplot2grid` for the creation of each axis (e.g colspan, rowspan) Other Kwargs will be passed on to all :py:func:`multiple_scatterplots()` calls (If they are not overwritten by parameters in `subplot_params`). """ #convert parameters to list of parameters for subplots subplot_params = kwargs.pop('subplot_params', {}) axes_kwargs = kwargs.pop('axes_kwargs', {}) param_list = [None] * len(axes_loc) for indx, val in enumerate(param_list): if indx in subplot_params: param_list[indx] = subplot_params[indx] else: param_list[indx] = {} if indx in axes_kwargs: param_list[indx]['axes_kwargs'] = axes_kwargs[indx] if not isinstance(xlabel, list): param_list[indx]['xlabel'] = xlabel else: param_list[indx]['xlabel'] = xlabel[indx] if not isinstance(ylabel, list): param_list[indx]['ylabel'] = ylabel else: param_list[indx]['ylabel'] = ylabel[indx] if not isinstance(title, list): param_list[indx]['title'] = title else: param_list[indx]['title'] = title[indx] general_keys = {'figure_kwargs', 'show', 'save_plots'} general_info = {key: val for key, val in kwargs.items() if key in general_keys} kwargs = {key: val for key, val in kwargs.items() if key not in general_keys} plot_params.set_parameters(**general_info) #figsize is automatically scaled with the shape of the plot plot_shape = (num_cols, num_rows) plot_params['figure_kwargs'] = { 'figsize': ([plot_shape[indx] * size for indx, size in enumerate(plot_params['figure_kwargs']['figsize'])]) } plot_shape = tuple(reversed(plot_shape)) fig = plt.figure(**plot_params['figure_kwargs']) axis = [] for indx, subplot_data in enumerate(zip(axes_loc, xdata, ydata, param_list)): location, x, y, params = subplot_data subplot_kwargs = copy.deepcopy(kwargs) subplot_kwargs.update(params) ax = plt.subplot2grid(plot_shape, location, fig=fig, **subplot_kwargs.pop('axes_kwargs', {})) with NestedPlotParameters(plot_params): ax = multiple_scatterplots(x, y, axis=ax, **subplot_kwargs, save_plots=False, show=False) axis.append(ax) plot_params.save_plot(saveas) return axis
22d9aa3b0de496c498535b2b4bf663be429b8f48
20,649
import torch def log1p_mse_loss(estimate: torch.Tensor, target: torch.Tensor, reduce: str = 'sum'): """ Computes the log1p-mse loss between `x` and `y` as defined in [1], eq. 4. The `reduction` only affects the speaker dimension; the time dimension is always reduced by a mean operation as in [1]. It has the advantage of not going to negative infinity in case of perfect reconstruction while keeping the logarithmic nature. The log1p-mse loss is defined as [1]: .. math:: L^{\\text{T-L1PMSE}} = \\log_10 (1 + \sum_t |x(t) - y(t)|^2) Args: estimate (... x T): The estimated signal target (... x T, same as estimate): The target signal reduce: Returns: The log1p-mse error between `estimate` and `target` References: [1] Thilo von Neumann, Christoph Boeddeker, Lukas Drude, Keisuke Kinoshita, Marc Delcroix, Tomohiro Nakatani, and Reinhold Haeb-Umbach. „Multi-talker ASR for an unknown number of sources: Joint training of source counting, separation and ASR“. http://arxiv.org/abs/2006.02786. """ # Use the PyTorch implementation for MSE, should be the fastest return _reduce( torch.log10( 1 + F.mse_loss(estimate, target, reduce='none').mean(dim=-1)), reduce=reduce )
7c67a67dcf6f6d14bb712d5a92b54ea979f7a73c
20,650
def quaternion_inverse(quaternion: np.ndarray) -> np.ndarray: """Return inverse of quaternion.""" return quaternion_conjugate(quaternion) / np.dot(quaternion, quaternion)
b71c5b544199b02a76362bc42db900b157ea80ec
20,651
def _make_indexable(iterable): """Ensure iterable supports indexing or convert to an indexable variant. Convert sparse matrices to csr and other non-indexable iterable to arrays. Let `None` and indexable objects (e.g. pandas dataframes) pass unchanged. Parameters ---------- iterable : {list, dataframe, array, sparse} or None Object to be converted to an indexable iterable. """ if issparse(iterable): return mt.tensor(iterable) elif hasattr(iterable, "iloc"): if iterable.ndim == 1: return md.Series(iterable) else: return md.DataFrame(iterable) elif hasattr(iterable, "__getitem__"): return mt.tensor(iterable) elif iterable is None: return iterable return mt.tensor(iterable)
29d067826e0a863b06b1fb0295b12d57ecaea00d
20,652
def batchnorm_forward(x, gamma, beta, bn_param): """ Forward pass for batch normalization. During training the sample mean and (uncorrected) sample variance are computed from minibatch statistics and used to normalize the incoming data. During training we also keep an exponentially decaying running mean of the mean and variance of each feature, and these averages are used to normalize data at test-time. At each timestep we update the running averages for mean and variance using an exponential decay based on the momentum parameter: running_mean = momentum * running_mean + (1 - momentum) * sample_mean running_var = momentum * running_var + (1 - momentum) * sample_var Note that the batch normalization paper suggests a different test-time behavior: they compute sample mean and variance for each feature using a large number of training images rather than using a running average. For this implementation we have chosen to use running averages instead since they do not require an additional estimation step; the torch7 implementation of batch normalization also uses running averages. Input: - x: Data of shape (N, D) - gamma: Scale parameter of shape (D,) - beta: Shift paremeter of shape (D,) - bn_param: Dictionary with the following keys: - mode: 'train' or 'test'; required - eps: Constant for numeric stability - momentum: Constant for running mean / variance. - running_mean: Array of shape (D,) giving running mean of features - running_var Array of shape (D,) giving running variance of features Returns a tuple of: - out: of shape (N, D) - cache: A tuple of values needed in the backward pass """ mode = bn_param['mode'] eps = bn_param.get('eps', 1e-5) momentum = bn_param.get('momentum', 0.9) N, D = x.shape running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype)) running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype)) out, cache = None, None if mode == 'train': # Forward pass # Step 1 - shape of mu (D,) mu = 1 / float(N) * np.sum(x, axis=0) # Step 2 - shape of var (N,D) xmu = x - mu # Step 3 - shape of carre (N,D) carre = xmu**2 # Step 4 - shape of var (D,) var = 1 / float(N) * np.sum(carre, axis=0) # Step 5 - Shape sqrtvar (D,) sqrtvar = np.sqrt(var + eps) # Step 6 - Shape invvar (D,) invvar = 1. / sqrtvar # Step 7 - Shape va2 (N,D) va2 = xmu * invvar # Step 8 - Shape va3 (N,D) va3 = gamma * va2 # Step 9 - Shape out (N,D) out = va3 + beta running_mean = momentum * running_mean + (1.0 - momentum) * mu running_var = momentum * running_var + (1.0 - momentum) * var cache = (mu, xmu, carre, var, sqrtvar, invvar, va2, va3, gamma, beta, x, bn_param) elif mode == 'test': mu = running_mean var = running_var xhat = (x - mu) / np.sqrt(var + eps) out = gamma * xhat + beta cache = (mu, var, gamma, beta, bn_param) else: raise ValueError('Invalid forward batchnorm mode "%s"' % mode) # Store the updated running means back into bn_param bn_param['running_mean'] = running_mean bn_param['running_var'] = running_var return out, cache
b36ea808c5865eb92a81464c3efe14ab9325d01e
20,653
def chunking(): """ transforms dataframe of full texts into a list of chunked texts of 2000 tokens each """ word_list = [] chunk_list = [] text_chunks = [] # comma separating every word in a book for entry in range(len(df)): word_list.append(df.text[entry].split()) # create a chunk of 2000 words for entry in word_list: chunk_list.append(list(divide_chunks(entry, 2000))) # flatten chunk list from a nested list to a list text_chunks = [item for l in chunk_list for item in l] print("Texts have been divided into cunks of 2000 tokens each for easier preprocessing") return(text_chunks)
66e1976b3bd9e88420fab370f1eee9053986bd56
20,654
def generate_random_string(): """Create a random string with 8 letters for users.""" letters = ascii_lowercase + digits return ''.join(choice(letters) for i in range(8))
027a9d50e2ff5b80b7344d35e492ace7c65366e8
20,655
def contains_message(response, message): """ Inspired by django's self.assertRaisesMessage Useful for confirming the response contains the provided message, """ if len(response.context['messages']) != 1: return False full_message = str(list(response.context['messages'])[0]) return message in full_message
4afcdba84603b8b53095a52e769d0a8e3f7bbb17
20,656
def definition(): """To be used by UI.""" sql = f""" SELECT c.course_id, c.curriculum_id, cs.course_session_id, description + ' year ' +CAST(session as varchar(2)) as description, CASE WHEN conf.course_id IS NULL THEN 0 ELSE 1 END as linked, 0 as changed FROM ({select_all_and_default(Course)}) as c LEFT JOIN c_course_session cs ON cs.curriculum_id = c.curriculum_id LEFT JOIN c_course_config conf ON conf.course_id = c.course_id AND conf.course_session_id = cs.course_session_id""" return sql
ac67783943604e0e83bd4ccfc2b704737e427edd
20,657
def exec_psql_cmd(command, host, port, db="template1", tuples_only=True): """ Sets up execution environment and runs the HAWQ queries """ src_cmd = "export PGPORT={0} && source {1}".format(port, hawq_constants.hawq_greenplum_path_file) if tuples_only: cmd = src_cmd + " && psql -d {0} -c \\\\\\\"{1};\\\\\\\"".format(db, command) else: cmd = src_cmd + " && psql -t -d {0} -c \\\\\\\"{1};\\\\\\\"".format(db, command) retcode, out, err = exec_ssh_cmd(host, cmd) if retcode: Logger.error("SQL command executed failed: {0}\nReturncode: {1}\nStdout: {2}\nStderr: {3}".format(cmd, retcode, out, err)) raise Fail("SQL command executed failed.") Logger.info("Output:\n{0}".format(out)) return retcode, out, err
453f0c2ef0dfdf2a5d03b22d4a6fbd03282dd72a
20,658
def carla_cityscapes_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member """Returns a `NumPy` array from a `CARLA` semantic segmentation image. Args: image: The `CARLA` semantic segmented image. Returns: A `NumPy` array representation of the image. """ image.convert(carla.ColorConverter.CityScapesPalette) # pylint: disable=no-member array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8")) array = array.astype(np.float32) / 255 array = np.reshape(array, (image.height, image.width, 4)) array = array[:, :, :3] array = array[:, :, ::-1] return array
f191d3f9700b281178f395726d649e90dfc57bb7
20,659
import re def since(version): """A decorator that annotates a function to append the version of skutil the function was added. This decorator is an adaptation of PySpark's. Parameters ---------- version : str, float or int The version the specified method was added to skutil. Examples -------- >>> @since('0.1.5') ... def some_fun(): ... '''Some docstring''' ... return None ... >>> >>> some_fun.__doc__ # doctest: +SKIP 'Some docstring\n\n.. versionadded:: 0.1.5' .. versionadded:: 0.1.5 """ indent_p = re.compile(r'\n( +)') def deco(f): indents = indent_p.findall(f.__doc__) indent = ' ' * (min(len(m) for m in indents) if indents else 0) f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version) return f return deco
e6b29b5e4c67ba4a213b183a0b79a1f16a85d81c
20,660
def get_dMdU(): """Compute dMdU""" dMdC = form_nd_array("dMdC", [3,3,3,3,3]) dMdPsi = form_nd_array("dMdPsi", [3,3,3,3,3]) dMdGamma = form_nd_array("dMdGamma",[3,3,3,3,3,3]) dCdU = form_symb_dCdU() dPsidU = form_symb_dPhidU() dGammadU = form_symb_dGammadU() dMdU = form_nd_array("dMdU",[3,3,3,8*12]) for I in range(3): for J in range(3): for K in range(3): for L in range(8*12): dMdU[I,J,K,L] = 0 for O in range(3): for P in range(3): dMdU[I,J,K,L] += dMdC[I,J,K,O,P]*dCdU[O,P,L] + dMdPsi[I,J,K,O,P]*dPsidU[O,P,L] for Q in range(3): dMdU[I,J,K,L] += dMdGamma[I,J,K,O,P,Q]*dGammadU[O,P,Q,L] tmp = [get_matrix_form_TOT(dCdU)[:,:12], get_matrix_form_TOT(dPsidU)[:,:12],\ get_matrix_form_FOT(dGammadU)[:,:12], get_matrix_form_VOT(dMdC),\ get_matrix_form_VOT(dMdPsi), get_matrix_form_VIOT(dMdGamma)] symb = ["dCdU","dPsidU","dGammadU","dMdC","dMdPsi","dMdGamma"] [implementation_extract_matrix(t,s,"I","I") for t,s in zip(tmp,symb)] implementation_print_matrix(get_matrix_form_FOT(dMdU)[:,:12],"dMdU","I","I") return dMdU
55d6dedc5311c8a2a30c44569508bd7687400cb5
20,661
def get_group_value_ctx_nb(sc_oc): """Get group value from context. Accepts `vectorbt.portfolio.enums.SegmentContext` and `vectorbt.portfolio.enums.OrderContext`. Best called once from `segment_prep_func_nb`. To set the valuation price, change `last_val_price` of the context in-place. !!! note Cash sharing must be enabled.""" if not sc_oc.cash_sharing: raise ValueError("Cash sharing must be enabled") return get_group_value_nb( sc_oc.from_col, sc_oc.to_col, sc_oc.last_cash[sc_oc.group], sc_oc.last_shares, sc_oc.last_val_price )
0646e7a26b36af42ee38196e0ee60e3684da2d16
20,662
import math import torch import scipy def motion_blur_generate_kernel(radius, angle, sigma): """ Args: radius angle (float): Radians clockwise from the (x=1, y=0) vector. This is how ImageMagick's -motion-blur filter accepts angles, as far as I can tell. >>> mb_1_0_inf_expected = torch.ones(3) / 3 >>> mb_1_0_inf = motion_blur_generate_kernel(1, 0, np.inf)[0] >>> assert torch.all(torch.isclose(mb_1_0_inf[0], mb_1_0_inf_expected)) >>> g_3_1 = torch.from_numpy(scipy.signal.gaussian(5, 1)[2:]).float() >>> g_3_1 /= g_3_1.sum() >>> mb_1_0_1 = motion_blur_generate_kernel(1, 0, 1)[0] >>> assert torch.all(mb_1_0_1[0] == g_3_1), (mb_1_0_1[0], g_3_1) >>> assert torch.all(mb_1_0_1[1] == 0) >>> assert torch.all(mb_1_0_1[2] == 0) """ # Make angles be counterclockwise from (x=1, y=0) vector to maintain sanity. angle = 2 * np.pi - angle # Make all angles lie in [0, 2*pi] if angle < 0: angle += math.ceil(angle / (2 * np.pi)) * 2*np.pi if angle > 2 * np.pi: angle = angle % (2 * np.pi) size = 2 * radius + 1 kernel = torch.zeros((size, size)) # Gaussian centered at 0th element. kernel_1d = scipy.signal.gaussian(size * 2 - 1, sigma)[size-1:] direction_up = 0 <= angle <= np.pi direction_right = (angle < np.pi / 2) or (angle > 3 / 2 * np.pi) cy = size - 1 if direction_up else 0 cx = 0 if direction_right else size - 1 # dy is relative to matrix coordinates, so, e.g., angle of np.pi/4 should # be a line going up => dy should be negative. dx, dy = np.cos(angle).item(), -np.sin(angle).item() for i in range(size): # *o*ffset_*x*, *o*ffset_*y* ox, oy = dx * i, dy * i x = min(cx + round(ox), size) y = min(cy + round(oy), size) assert x >= 0, f'x={x} should be >= 0!' assert y >= 0, f'y={y} should be >= 0!' kernel[y, x] = kernel_1d[i] kernel /= kernel.sum() return kernel, cy, cx
ff4e939d2ffbc91b6ef6af2ca11aceb1d32df594
20,663
def substitute_crypto_to_req(req): """Replace crypto requirements if customized.""" crypto_backend = get_crypto_req() if crypto_backend is None: return req def is_not_crypto(r): CRYPTO_LIBS = PYCRYPTO_DIST, "cryptography" return not any(r.lower().startswith(c) for c in CRYPTO_LIBS) return [r for r in req if is_not_crypto(r)] + [crypto_backend]
0e1836120f52981c3ff126038c0c74b9da94aa7f
20,664
def remove_att(doc_id, doc_rev, att_id, **kwargs): """Delete an attachment. http://docs.couchdb.org/en/stable/api/document/attachments.html#delete--db-docid-attname :param str doc_id: The attachment document. :param str doc_rev: The document revision. :param str att_id: The attachment to remove. :param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes. :rtype: (str, str, dict) """ if ("params" not in kwargs) or (not isinstance(kwargs["params"], dict)): kwargs["params"] = {} path = urljoin(utils.encode_document_id(doc_id), utils.encode_attachment_id(att_id)) kwargs["params"]["rev"] = doc_rev return "DELETE", path, kwargs
2b9361468baf4dc2e358b2fa2f4c43403556cd40
20,665