content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def memoize_with_hashable_args(func): """Decorator for fast caching of functions which have hashable args. Note that it will convert np.NaN to None for caching to avoid this common case causing a cache miss. """ _cached_results_ = {} hash_override = getattr(func, "__hash_override__", None) if hash_override is None: hash_override = get_hash(func) @wraps(func) def memoized(*args): try: lookup_args = tuple(x if pd.notnull(x) else None for x in args) res = _cached_results_[lookup_args] except KeyError: res = func(*args) _cached_results_[lookup_args] = res return res memoized._cached_results_ = _cached_results_ # pylint: disable=protected-access memoized.__hash_override__ = hash_override return memoized
b5e55b35042688d9131e05e36a56bf0f6515f336
19,064
def orthogonalize(vec1, vec2): """Given two vectors vec1 and vec2, project out the component of vec1 that is along the vec2-direction. @param[in] vec1 The projectee (i.e. output is some modified version of vec1) @param[in] vec2 The projector (component subtracted out from vec1 is parallel to this) @return answer A copy of vec1 but with the vec2-component projected out. """ v2u = vec2/np.linalg.norm(vec2) return vec1 - v2u*np.dot(vec1, v2u)
aceca85edfc6ed4a6c3b21cb169f993b0b30c889
19,065
import re def tokenize(text): """ Function to process text data taking following steps: 1) normalization and punctuation removal: convert to lower case and remove punctuations 2) tokenization: splitting each sentence into sequence of words 3) stop words removal: removal of words which do not add a meaning to the sentence 4) lemmatization: reducting words to their root form Args: text (str): string with message Returns: clean_tokens: cleaned tokens of the message with word list """ # normalize case and remove punctuation text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # tokenize text and innitiate lemmatizer tokens = word_tokenize(text) lemmatizer = WordNetLemmatizer() # remove stopwords tokens = [w for w in tokens if w not in stopwords.words('english')] # iterate through each token clean_tokens = [] for tok in tokens: # lemmatize and remove leading/ trailing white space clean_tok = lemmatizer.lemmatize(tok).strip() clean_tokens.append(clean_tok) return clean_tokens
769949bc2d4a23d4064b8addcaa7dbfc29346549
19,066
def extract_text(bucketname, filepath): """Return OCR data associated with filepaths""" textract = boto3.client('textract') response = textract.detect_document_text( Document={ 'S3Object': { 'Bucket': bucketname, 'Name': filepath } }) return response
fde7c1bc99003bf8f094538d402a66b6d0c8bcb4
19,067
def box1_input(input): """uses above to return input to player 1""" return get_input(1, input)
c619be2f73fc124eb3198c27542af014eeffd3f8
19,068
def _get_xy_from_geometry(df): """ Return a numpy array with two columns, where the first holds the `x` geometry coordinate and the second column holds the `y` geometry coordinate """ # NEW: use the centroid.x and centroid.y to support Polygon() and Point() geometries x = df.geometry.centroid.x y = df.geometry.centroid.y return np.column_stack((x, y))
6a1345607d3c75190dd9fd22ea45aad82901282a
19,069
def create_styled_figure( title, name=None, tooltips=None, plot_width=PLOT_WIDTH, ): """Return a styled, empty figure of predetermined height and width. Args: title (str): Title of the figure. name (str): Name of the plot for later retrieval by bokeh. If not given the title is set as name tooltips (list, optional): List of bokeh tooltips to add to the figure. Returns: fig (bokeh Figure) """ assert plot_width is not None name = name if name is not None else title fig = figure( plot_height=PLOT_HEIGHT, plot_width=plot_width, title=title.title(), tooltips=tooltips, name=name, y_axis_type="linear", sizing_mode="scale_width", ) fig.title.text_font_size = "15pt" # set minimum borders fig.min_border_left = MIN_BORDER_LEFT fig.min_border_right = MIN_BORDER_RIGHT fig.min_border_top = MIN_BORDER_TOP fig.min_border_bottom = MIN_BORDER_BOTTOM # remove toolbar fig.toolbar_location = TOOLBAR_LOCATION # remove grid fig.grid.visible = GRID_VISIBLE # remove minor ticks fig.axis.minor_tick_line_color = MINOR_TICK_LINE_COLOR # remove tick lines fig.axis.major_tick_out = MAJOR_TICK_OUT fig.axis.major_tick_in = MAJOR_TICK_IN # remove outline fig.outline_line_width = OUTLINE_LINE_WIDTH return fig
caeb7eb887d84c5e1ebaf83b01a71ce15917a27f
19,070
def render_text(string, padding=5, width=None, height=None, size=12, font="Arial", fgcolor=(0, 0, 0), bgcolor=None): """ Render text to an image and return it Not specifying bgcolor will give a transparent image, but that will take a *lot* more work to build. Specifying a bgcolor, width, and height will heavily optimize things. """ actor = text.text_actor(string, fgcolor, size, font) if bgcolor is None: mask = True # Set it to the opposite of fgcolor so we can mask using it bgcolor = (1 - fgcolor[0], 1 - fgcolor[1], 1 - fgcolor[1]) else: mask = False lines = string.split("\n") if width is None: # EM is defined as the square of the line height, and is the guide for making fonts # We can use that as an upper bound (assuming font size is ~ line # height) width = size * max([len(s) for s in lines]) if height is None: height = size * len(lines) image = actor_to_image(actor, bgcolor, width, height) if mask: image = mask_color(image, bgcolor) image = crop_blank_space(image) width, height, _ = image.GetDimensions() return pad_image( image, pad_width=width + padding * 2, pad_height=height + padding * 2) else: return image
9402aa9cbcbb920b73723d74b944f5940db9e0e0
19,071
def pretty_spectrogram(d,log = True, thresh= 5, fft_size = 512, step_size = 64): """ creates a spectrogram log: take the log of the spectrgram thresh: threshold minimum power for log spectrogram """ specgram = np.abs(stft(d, fftsize=fft_size, step=step_size, real=False, compute_onesided=True)) if log == True: specgram /= specgram.max() # volume normalize to max 1 specgram = np.log10(specgram) # take log specgram[specgram < -thresh] = -thresh # set anything less than the threshold as the threshold else: specgram[specgram < thresh] = thresh # set anything less than the threshold as the threshold return specgram
eaae2893944df28dcefb607bac3e8648db265acf
19,072
def check_for_win(position, board, player): """ check for wins on 3x3 board on rows,cols,diag,anti-diag args: position (int 1-9, user input) board (np.array 2d) player ("X" or "O") """ #initialize win to False win = False #check win on rows for row in board: if np.all(row==player): win = True #check win on cols (index 0,1,2) for i in range(3): if(np.all(board[:,i]==player)): win = True #check win on diagonals if np.all(board.diagonal()==player): win = True #check win on anti-diagonals if np.all(np.fliplr(board).diagonal()==player): win = True return win
fad580912f3a281ce605fd743732481915c352ea
19,073
def wrap_http_exception(app: FastAPI): """ https://doc.acrobits.net/api/client/intro.html#web-service-responses """ @app.exception_handler(StarletteHTTPException) async def http_exception_handler(request, exc): return JSONResponse({'message': exc.detail}, exc.status_code)
b43eea9b59eb50eaefd3d52a5569d6952518f61b
19,074
import random def perm_2sample(group1, group2, nrand=10000, tail=0, paired=True): # Take from JW's functions """ non-parametric permutation test (Efron & Tibshirani, 1998) tail = 0 (test A~=B), 1 (test A>B), -1 (test A<B) """ a = group1 b = group2 ntra = len(a) ntrb = len(b) meana = np.mean(a) meanb = np.mean(b) triala = np.zeros(nrand) trialb = np.zeros(nrand) if paired: for i in range(nrand): alldat = np.vstack((a,b)).T for j in range(ntra): alldat[j,:] = alldat[j,np.argsort(np.random.rand(2))] triala[i] = alldat[:,0].mean() trialb[i] = alldat[:,1].mean() else: alldat = np.concatenate((a,b)) indices = np.arange(alldat.shape[0]) for i in range(nrand): random.shuffle(indices) triala[i] = np.mean(alldat[indices[:ntra]]) trialb[i] = np.mean(alldat[indices[ntra:]]) if tail == 0: p_value = sum(abs(triala-trialb)>=abs(meana-meanb)) / float(nrand) else: p_value = sum((tail*(triala-trialb))>=(tail*(meana-meanb))) / float(nrand) return(meana-meanb, p_value)
afb3c56d277c583eeb34089bcd808e7e6e662ec7
19,075
def softmax_op(node): """ This function computes its softmax along an axis. Parameters: ---- node : Node Input variable. Returns: ---- A new Node instance created by Op. """ return SoftmaxOp()(node)
42004658214b7b7c083d40fe43393f1cf450175b
19,076
def full_chain(): """ :return: Returns entire blockchain in memory (current_chain.blockchain) """ response = { 'chain': current_chain.blockchain, 'length': len(current_chain.blockchain), } return response, 200
0161195e3dc28b9157ee824f8d3103029f058498
19,077
def normalizer(x, mi, ma, eps=1e-20, dtype=np.float32): """ Number expression evaluation for normalization Parameters ---------- x : np array of Image patch mi : minimum input percentile value ma : maximum input percentile value eps: avoid dividing by zero dtype: type of numpy array, float 32 defaut """ if dtype is not None: x = x.astype(dtype, copy=False) mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype, copy=False) ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype, copy=False) eps = dtype(eps) x = (x - mi) / (ma - mi + eps) x = normalizeZeroOne(x) return x
0f39a4d02bc3f3897d0b2070567a45eb68ade2d4
19,078
def do(args): """ Main Entry Point. """ build_worktree = qibuild.parsers.get_build_worktree(args) sourceme = build_worktree.generate_sourceme() print(sourceme) return sourceme
d706fe7f8a277f58dd8f184212851d087b7890d1
19,079
import json import traceback def policy_action(module, state=None, policy_name=None, policy_arn=None, policy_document=None, path=None, description=None): """ Execute the actions needed to bring the policy into the specified state. Args: module (obj): Ansible module state (str): Ansible state - 'present' | 'absent' policy_name (str): Policy name. One and only one of policy name or policy ARN must be given. policy_arn (str): Policy ARN. One and only one of policy name or policy ARN must be given. policy_document(dict): JSON policy document path (str): Policy path description (str): Policy description. Defaults to 'policy_name' Returns: Success: (bool) changed, (dict) policy object (see boto3.get_policy docs) Failure: Invokes module.fail_json with suitable text at point of error """ changed = False policy = None error = {} if state == 'present': try: if isinstance(policy_document, dict): policy_document = json.dumps(policy_document) response = policy_m.create_policy( policy_name=policy_name, path=path, policy_document=policy_document, description=description) if 'error' in response: error = response['error'] else: if response['state'] == 'New': changed = True policy = response['policy'] except Exception as e: module.fail_json(msg='policy action {0} failed: {1} {2}'.format('present', e,traceback.format_exc())) elif state == 'absent': try: response = policy_m.delete_policy( policy_name=policy_name, path=path) if 'error' in response: error = response['error'] else: changed = True policy = response['policy'] except Exception as e: module.fail_json(msg='policy action {0} failed: {1} {2}'.format('absent', e,traceback.format_exc())) else: error = {"error": "state must be either 'present' or 'absent'"} if error: module.fail_json(msg='policy action failed: {0}'.format(error)) return changed, policy
5da4c4649170e81569cc3e77fa102fd0043cebd9
19,080
def GET_v1_metrics_location(days=1): """Return some data about the locations users have reported from. """ if days > 7: days = 7 from_time = f'-{days}d' locations = fetch_graphite_sum('*.geoip.*', from_time=from_time) return jsonify(locations=locations)
e9765f338c1adbbb46e203024b7637af45e2f217
19,081
def add_default_to_usage_help( usage_help: str, default: str or int or float or bool ) -> str: """Adds default value to usage help string. Args: usage_help (str): usage help for click option. default (str or int or float): default value as string for click option. Returns: str: New usage_help value. """ if default is not None: return f"{usage_help} default={default}" return usage_help
a40cf9a68f18beeafcb965c51e0329b4e8216fb4
19,082
def describe_deformation(el_disps, bfg): """ Describe deformation of a thin incompressible 2D membrane in 3D space, composed of flat finite element faces. The coordinate system of each element (face), i.e. the membrane mid-surface, should coincide with the `x`, `y` axes of the `x-y` plane. Parameters ---------- el_disps : array The displacements of element nodes, shape `(n_el, n_ep, dim)`. bfg : array The in-plane base function gradients, shape `(n_el, n_qp, dim-1, n_ep)`. Returns ------- mtx_c ; array The in-plane right Cauchy-Green deformation tensor :math:`C_{ij}`, :math:`i, j = 1, 2`. c33 : array The component :math:`C_{33}` computed from the incompressibility condition. mtx_b : array The discrete Green strain variation operator. """ sh = bfg.shape n_ep = sh[3] dim = el_disps.shape[2] sym2 = dim2sym(dim-1) # Repeat el_disps by number of quadrature points. el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1]) # Transformed (in-plane) displacement gradient with # shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a. du = dot_sequences(bfg, el_disps_qp) # Deformation gradient F w.r.t. in plane coordinates. # F_{ia} = dx_i / dX_a, # a \in {1, 2} (rows), i \in {1, 2, 3} (columns). mtx_f = du + nm.eye(dim - 1, dim, dtype=du.dtype) # Right Cauchy-Green deformation tensor C. # C_{ab} = F_{ka} F_{kb}, a, b \in {1, 2}. mtx_c = dot_sequences(mtx_f, mtx_f, 'ABT') # C_33 from incompressibility. c33 = 1.0 / (mtx_c[..., 0, 0] * mtx_c[..., 1, 1] - mtx_c[..., 0, 1]**2) # Discrete Green strain variation operator. mtx_b = nm.empty((sh[0], sh[1], sym2, dim * n_ep), dtype=nm.float64) mtx_b[..., 0, 0*n_ep:1*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 0:1] mtx_b[..., 0, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 1:2] mtx_b[..., 0, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 2:3] mtx_b[..., 1, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 0:1] mtx_b[..., 1, 1*n_ep:2*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 1:2] mtx_b[..., 1, 2*n_ep:3*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 2:3] mtx_b[..., 2, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 0, 0:1] \ + bfg[..., 0, :] * mtx_f[..., 1, 0:1] mtx_b[..., 2, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 1:2] \ + bfg[..., 1, :] * mtx_f[..., 0, 1:2] mtx_b[..., 2, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 2:3] \ + bfg[..., 1, :] * mtx_f[..., 0, 2:3] return mtx_c, c33, mtx_b
e748a8ecf3cc369fb03ba835b6f0c762eeafbf07
19,083
import re def remove_emails(text): """Returns A String with the emails removed """ result = re.sub(EMAIL_REGEX, "", text) return result
08d30119f5e32a92c3df3a7b5612ba99390e7df9
19,084
def _fit_model_residual_with_radial(lmparams, star, self, interpfunc): """Residual function for fitting individual profile parameters :param lmparams: lmfit Parameters object :param star: A Star instance. :param self: PSF instance :param interpfunc: The interpolation function :returns chi: Chi of observed pixels to model pixels """ all_params = lmparams.valuesdict().values() flux, du, dv = all_params[:3] params = all_params[3:] prof = self.getProfile(params) image, weight, image_pos = star.data.getImage() # use for getting drawprofile star.fit.flux = flux star.fit.center = (du, dv) star_drawn = drawProfile(self, star, prof, params, use_fit=True, interpfunc=interpfunc) image_model = star_drawn.image chi = (np.sqrt(weight.array) * (image_model.array - image.array)).flatten() return chi
77f4031dbf7522236c36a8e096c965766df16ccb
19,085
def _whoami(): # type: () -> Tuple[str,str] """ Return the current operating system account as (username, fullname) """ username = getuser() fullname = username if GET_PW_NAM: pwnam = getpwnam(username) if pwnam: fullname = pwnam.pw_gecos.split(",", 1)[0] return (username, fullname)
508189e4798e83425b754ad6cde617a3b0d1ec9f
19,086
from typing import Sequence def text_set_class( set_class: Sequence, ) -> str: """Converts a set class into a string representing its interval vector. """ id_dict = {0: "one", 1: "two", 2: "three", 3: "four", 4: "five", 5: "six"} result = "" for i, el in enumerate(interval_vector(set_class)): for _ in range(el): result += id_dict[i] + " " return result.rstrip()
f430ddf4b32f64f37df5fb9ec984ce5a809b09a1
19,087
def Span_read(stream): """Read a span from an 88.1 protocol stream.""" start = Address_read(stream) width = Offset_read(stream) return Span(start, width)
0bbd5d62a1111dd056a939ee272ea200e1f7abd9
19,088
def temp_database(tmpdir_factory): """ Initalize the Database """ tmpdb = str(tmpdir_factory.mktemp('temp'))+"/testdb.sqlite" return tmpdb
5cfcb27e6ac76766e21a1612691dbe79d1713abd
19,089
import ast def get_classes(pyfile_path): """ Obtiene las clases que están dentro de un fichero python :param str pyfile_path: nombre del fichero a inspeccionar :return: devuelve una lista con todas las clases dentro de un fichero python :rtype: list .. code-block:: python >> get_classes('./data.py') ['Module', 'PythonFile'] """ with open(pyfile_path, 'r') as f: inspection = ast.parse(f.read()) return [class_.name for class_ in inspection.body if isinstance(class_, ast.ClassDef)]
72f376d10fd02574085a0236e10ea8901033ebd0
19,090
from typing import List def transpose_outer_dimensions(outer_dimensions: ST_Type, diff_dimensions: ST_Type, ports_to_transpose: List) -> Kind: """ Transpose the outer dimensions of a set of ports, move them inside the diff dimensions. The outer dimensions that are sseqs are the same for all elements, so treat as inner dimensions. :param outer_dimensions: The outer dimensions that need to be moved inside :param diff_dimensions: The dimensions that need to be moved outside :param ports_to_transpose: The ports :return: """ # always remove tseqs as they don't affect the magma types num_outer_dimensions = num_nested_layers(remove_tseqs(outer_dimensions)) num_diff_dimensions = num_nested_layers(remove_tseqs(diff_dimensions)) # these are the indexes of the dimensions on the untransposed type outer_dimensions_indexes_untransposed = list(range(num_outer_dimensions)) diff_dimensions_indexes_untransposed = list(range(num_outer_dimensions, num_outer_dimensions + num_diff_dimensions)) sseq_dims_transposed = diff_dimensions_indexes_untransposed + outer_dimensions_indexes_untransposed # performing the transpose with blockers added so right dimensions not converted ports_to_transpose_with_block = add_blocker(ports_to_transpose, len(sseq_dims_transposed)) orig_arr = np.asarray(ports_to_transpose_with_block) transposed_arr = orig_arr.transpose(sseq_dims_transposed) transposed_list_with_blocks = transposed_arr.tolist() return remove_blocker(transposed_list_with_blocks)
ca51943223bbca58f871a9cb4c6b296ae941e87d
19,091
def pad_or_clip_nd(tensor, output_shape): """Pad or Clip given tensor to the output shape. Args: tensor: Input tensor to pad or clip. output_shape: A list of integers / scalar tensors (or None for dynamic dim) representing the size to pad or clip each dimension of the input tensor. Returns: Input tensor padded and clipped to the output shape. """ tensor_shape = tf.shape(tensor) clip_size = [ tf.where(tensor_shape[i] - shape > 0, shape, -1) if shape is not None else -1 for i, shape in enumerate(output_shape) ] clipped_tensor = tf.slice( tensor, begin=tf.zeros(len(clip_size), dtype=tf.int32), size=clip_size) # Pad tensor if the shape of clipped tensor is smaller than the expected # shape. clipped_tensor_shape = tf.shape(clipped_tensor) trailing_paddings = [ shape - clipped_tensor_shape[i] if shape is not None else 0 for i, shape in enumerate(output_shape) ] paddings = tf.stack( [ tf.zeros(len(trailing_paddings), dtype=tf.int32), trailing_paddings ], axis=1) padded_tensor = tf.pad(clipped_tensor, paddings=paddings) output_static_shape = [ dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape ] padded_tensor.set_shape(output_static_shape) return padded_tensor
a22b6872b4af4424411d26232af0c21fda7c55df
19,092
def dynamic_lstm(x, n_neuron, act_fn=tanh, seq_len=None): """ assert x is batch_major, aka [batch, time, ...] """ cell_class = lstm with tf.variable_scope("fw"): cell_fw = cell_class(n_neuron, activation=act_fn, cell_clip=15.0) o, s = tf.nn.dynamic_rnn( cell_fw, x, seq_len, dtype=tf.float32) return o, s
4f2df8155281e3664d5d8521918af5c84a211a34
19,093
def ho2ax_single(ho): """Conversion from a single set of homochoric coordinates to an un-normalized axis-angle pair :cite:`rowenhorst2015consistent`. Parameters ---------- ho : numpy.ndarray 1D array of (x, y, z) as 64-bit floats. Returns ------- ax : numpy.ndarray 1D array of (x, y, z, angle) as 64-bit floats. Notes ----- This function is optimized with Numba, so care must be taken with array shapes and data types. """ # Constants stolen directly from EMsoft # fmt: off fit_parameters = np.array([ 0.9999999999999968, -0.49999999999986866, -0.025000000000632055, -0.003928571496460683, -0.0008164666077062752, -0.00019411896443261646, -0.00004985822229871769, -0.000014164962366386031, -1.9000248160936107e-6, -5.72184549898506e-6, 7.772149920658778e-6, -0.00001053483452909705, 9.528014229335313e-6, -5.660288876265125e-6, 1.2844901692764126e-6, 1.1255185726258763e-6, -1.3834391419956455e-6, 7.513691751164847e-7, -2.401996891720091e-7, 4.386887017466388e-8, -3.5917775353564864e-9 ]) # fmt: on ho_magnitude = np.sum(ho**2) if (ho_magnitude > -1e-8) and (ho_magnitude < 1e-8): ax = np.array([0, 0, 1, 0], dtype=np.float64) else: # Convert the magnitude to the rotation angle hom = ho_magnitude s = fit_parameters[0] + fit_parameters[1] * hom for i in nb.prange(2, 21): hom = hom * ho_magnitude s = s + fit_parameters[i] * hom hon = ho / np.sqrt(ho_magnitude) s = 2 * np.arccos(s) if np.abs(s - np.pi) < 1e-8: # pragma: no cover ax = np.append(hon, np.pi) else: ax = np.append(hon, s) return ax
50ec25eb488ea894f6a5c6a00feab27b93954200
19,094
import json def task_export_commit(request): """提交导出任务""" try: datas = json.loads(request.body.decode()) taskSetting = PlTaskSetting.objects.get(id=datas["params"]["id"]) try: exportJob = PlExportJob.objects.get(task_setting_id=taskSetting.id) except ObjectDoesNotExist: exportJob = PlExportJob(task_setting_id=taskSetting.id, run_time=timezone.now() - timedelta(weeks=100)) exportJob.save() # 先保存一遍,保证数据库中一定存在,因为下面要使用update语句更新符合条件的这个任务,防止并发问题 if 0 != exportJob.status: return response(-3, message="已经有导出任务提交,请先终止") # 执行更新,使用更新带条件操作是为了防止并发 updateRows = PlExportJob.objects.filter(task_setting_id=taskSetting.id, status=0).update( status = 1, req_stop = 0, process = 0, worker_name = "", download_addr = "", task_setting_info = json.dumps(model_to_dict(taskSetting)), export_setting_info = json.dumps(datas["params"]["setting"]) ) if updateRows <= 0: return response(-4, message="更新失败") result = response() except ObjectDoesNotExist: result = response(-1, message="监控任务不存在,可能已经被删除。") except DatabaseError: result = response(-2, message="数据库查询异常") return result
4ec543af62e9abab9194b22cf54e22de2551ce24
19,095
def getExceptionMessage(exceptionDetails: dict) -> str: """Get exception message from `exceptionDetails` object.""" exception = exceptionDetails.get('exception') if exception: return exception.get('description') message = exceptionDetails.get('text', '') stackTrace = exceptionDetails.get('stackTrace', dict()) if stackTrace: for callframe in stackTrace.get('callFrames'): location = ( str(callframe.get('url', '')) + ':' + str(callframe.get('lineNumber', '')) + ':' + str(callframe.get('columnNumber')) ) functionName = callframe.get('functionName', '<anonymous>') message = message + f'\n at {functionName} ({location})' return message
ba3d15aa383de9f55600a72ba113c37fd042d3a4
19,096
def evaluate_by_net(net, input_fn, **kwargs): """encapsulate evaluate """ ret = evaluate( graph=net.graph, sess=net.session, fea_ph=net.features_ph, label_ph=net.labels_ph, outputs=net.outputs, input_fn=input_fn, **kwargs ) return ret
a58b80b5e93dbb4251eb71393f8a9f70ddff813b
19,097
import array def ordinate(values,maxrange,levels): """Ordinate values given a maximum data range and number of levels Parameters: 1. values: an array of continuous values to ordinate 2. maxrange: the maximum data range. Values larger than this will be saturated. 3. levels: the number of levels at which values are ordinated """ quantizer=lambda dist,maxrange,levels: int(1.0*max(1,dist-1)*levels/maxrange)+1 if type(values)==list or type(values)==tuple or type(values)==array: ordinated=[] for v in values: if v==0: ordinated.append(v) else: ordinated.append(quantizer(v,maxrange,levels)) return ordinated else: if values==0: return values else: return quantizer(values,maxrange,levels)
4db4a26579d9208cd90ec630cf82e54a4a7ec3fe
19,098
def is_start_state(state): """ Checks if the given state is a start state. """ return (state.g_pos.value == 0) and (state.theta.value == 'N')
0f58e7a193533ba3d5db15c4e79ed98e190fa1be
19,099
def days_in_month(year, month): """ return number of days in that month in that year """ if not 1 <= month <= 12: return 'Invalid Month' if month == 2 and is_leap(year): return 29 return month_days[month]
8e9e5878fcfb595518d33a38baaf5bdc1b45c8ed
19,100
def tmm_normal(fPath, bFilter=True): """ Function to obtain the Voom normal Count Args: fPath string Path with the raw counts outPath string File output bFilter Bool Bool to FIlter low expression genes Returns: tmm dataframe DataFrame with the log2(TMM) counts """ tmm = tmm_normalization(fPath, str(bFilter)) return tmm
4741a6af490e24485bd4ad28e7289dd320abf77d
19,101
import base64 def np_to_base64(img_np): """ Convert numpy image (RGB) to base64 string """ img = Image.fromarray(img_np.astype("uint8"), "RGB") buffered = BytesIO() img.save(buffered, format="PNG") return "data:image/png;base64," + base64.b64encode( buffered.getvalue()).decode("ascii")
2856e8ccf5402b5f6615bc8b66a364cef3e3a01c
19,103
def sample_unknown_parameters(_params, _n=None): """ AW - sample_unknown_parameters - Sample the parameters we do not fix and hence wish to marginalize over. :param _params: SimpNameSp: dot accessible simple name space of simulation parameters. :return: SimpNameSp: dot accessible simple name space of simulation parameters, where those parameters that are not fixed have been re-drawn from the prior. """ if _n is None: _n = len(_params.log_a) _params_from_unknown = dc(_params) _params_from_prior = sample_prior_parameters(_params_from_unknown, _n) for _k in _params.uncontrolled_parameters: setattr(_params_from_unknown, _k, getattr(_params_from_prior, _k)) return _params_from_unknown
700d4ab80cd3e798fa87f9249194015377d19cc7
19,104
import logging def vector2Table (hdu, xlabel='wavelength',ylabel='flux') : """ Reads a 1-D vector from a FITS HDU into a Table. If present, the wavelength scale is hopefully in a simple, linear WCS! """ hdr = hdu.header if hdr['NAXIS'] != 1 : logging.error ('vector2Table can only construct 1-D tables!') return None nw = hdr['NAXIS1'] pixl = np.arange(nw) wave = None # GET FLUX bscale = 1.0 bzero = 0.0 """ if 'BSCALE' in hdr and 'BZERO' in hdr : bscale = hdr['BSCALE'] bzero = hdr['BZERO'] """ flux = hdu.data*bscale+bzero # GET WAVELENGTH if 'CRVAL1' in hdr and 'CDELT1' in hdr : # SIMPLE WCS crpix1 = 1 if 'CRPIX1' in hdr : crpix1 = hdr['CRPIX1'] w0 = hdr['CRVAL1'] dwdx = hdr['CDELT1'] wave = w0+dwdx*(pixl+1-(crpix1-1)) # GET UNITS if 'CUNIT1' in hdr : cunit1 = hdr['CUNIT1'] elif wave is not None : # ASSUME ASTRONOMERS USE ANGSTROMS cunit1 = 'nm' wave /= 10. else : cunit1 = 'pix' # CONSTRUCT Table t = Table() if wave is not None : t[xlabel] = Column(wave,unit=cunit1, description=xlabel) else : t[xlabel] = Column(pixl,unit=cunit1, description=xlabel) t[ylabel] = Column(flux,unit='unknown', description=ylabel) t.meta = hdr return t
b0ff458f8cf6de660ae5c314f3e9db1f50aeaf3c
19,105
def get_zarr_size(fn): """Get size of zarr file excluding metadata""" # Open file grp = zarr.open_group(fn) # Collect size total = 0 for var in list(grp.keys()): total += grp[var].nbytes_stored return total
e2fe053bf239156e74038672a435144cf7bc5216
19,106
def rotation_matrix(a, b): """ Calculate rotation matrix M, such that Ma is aligned to b Args: a: Initial vector direction b: Target direction """ # np.allclose might be safer here if np.array_equal(a, b): return np.eye(3) # Allow cases where a,b are not unit vectors, so normalise a = a / np.linalg.norm(a) b = b / np.linalg.norm(b) # Anti-parallel - rotate 180 degrees about any axis. if np.array_equal(a, -b): # If vector is (anti)parallel to z, rotate around x if np.array_equal(np.abs(a), np.array([0, 0, 1])): return np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]]) # Otherwise rotate around z return np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]]) v = np.cross(a, b) s = np.linalg.norm(v) t = np.dot(a, b) vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]]) return np.eye(3) + vx + np.dot(vx, vx) * (1-t) / (s**2)
948eb08758b81a6b9f2cc0f518dbb74f04970a1c
19,107
def saveuserprefs(): """ Fetch the preferences of the current user in JSON form """ user = current_user() j = request.get_json(silent=True) # Return the user preferences in JSON form uf = UserForm() uf.init_from_dict(j) err = uf.validate() if err: return jsonify(ok=False, err=err) uf.store(user) return jsonify(ok=True)
0b2e893623432f0337014df3f0a67a4d2174a082
19,109
import torch def make_features(batch, side, data_type='text'): """ Args: batch (Tensor): a batch of source or target data. side (str): for source or for target. data_type (str): type of the source input. Options are [text|img|audio]. Returns: A sequence of src/tgt tensors with optional feature tensors of size (len x batch). """ assert side in ['src', 'conversation', 'tgt'] if isinstance(batch.__dict__[side], tuple): data = batch.__dict__[side][0] else: data = batch.__dict__[side] feat_start = side + "_feat_" keys = sorted([k for k in batch.__dict__ if feat_start in k]) features = [batch.__dict__[k] for k in keys] levels = [data] + features if data_type == 'text': return torch.cat([level.unsqueeze(2) for level in levels], 2) else: return levels[0]
6ffed5546ea35a7be559f58521aa119d576ed465
19,111
def page_dirs_to_file_name(page_dirs): """ [カテゴリ1,カテゴリ2,ページ]というディレクトリの配列状態になっているページパスを 「カテゴリ1_._カテゴリ2_._ページ」というファイル名形式に変換する。 :param page_dirs: :return: """ file_name = "" for page_dir in page_dirs: if page_dir: file_name = file_name + page_dir.strip() + '_._' file_name = file_name[0:-len('_._')] file_name = _replace_windows_ng_word(file_name) return file_name
1e7bb5f04900440824e7a223fbb88599add86c07
19,112
def has_field(feature_class, field_name): """Returns true if the feature class has a field named field_name.""" for field in arcpy.ListFields(feature_class): if field.name.lower() == field_name.lower(): return True return False
afe2352a1a17b9c0c48e68b68ab41595230343f9
19,113
from re import T def process_settings(settings: AttrDict, params: T.Optional[T.Set[str]] = None, ignore: T.Iterable[str]=()) -> AttrDict: """ Process an dict-like input parameters, according to the rules specified in the `Input parameter documentation <https://sqsgenerator.readthedocs.io/en/latest/input_parameters.html>`_. This function should be used for processing user input. Therefore, exports the parser functions defined in ``sqsgenerator.settings.readers``. To specify a specify subset of parameters the {params} argument is used. To {ignore} specifc parameters pass a list of parameter names :param settings: the dict-like user configuration :type settings: AttrDict :param params: If specified only the subset of {params} is processed (default is ``None``) :type params: Optional[Set[``None``]] :param ignore: a list/iterable of params to ignore (default is ``()``) :type ignore: Iterable[``str``] :return: the processed settings dictionary :rtype: AttrDict """ params = params if params is not None else set(parameter_list()) last_needed_parameter = max(params, key=parameter_index) ignore = set(ignore) for index, (param, processor) in enumerate(__parameter_registry.items()): if param not in params: # we can only skip this parameter if None of the other parameters depends on param if parameter_index(param) > parameter_index(last_needed_parameter): continue if param in ignore: continue settings[param] = processor(settings) return settings
0cd49f857fe2923d71fb4be46cac4eefa1fa11bf
19,114
def serialize_block(block: dict) -> Block: """Serialize raw block from dict to structured and filtered custom Block object Parameters ---------- block : dict Raw KV block data from gRPC response Returns ------- Block Structured, custom defined Block object for more controlled data access """ return Block( block.get("id", None), block.get("number", None), block.get("header", {}).get("timestamp", None), block.get("header", {}).get("producer", None), block.get("unfilteredTransactionCount", 0), block.get("unfilteredTransactionTraceCount", 0), block.get("unfilteredExecutedInputActionCount", 0), block.get("unfilteredExecutedTotalActionCount", 0), block.get("filteringIncludeFilterExpr", 0), block.get("filteredTransactionTraceCount", 0), block.get("filteredExecutedInputActionCount", 0), block.get("filteredExecutedTotalActionCount", 0), list( map( lambda tx_trace: TransactionTrace( tx_trace.get("id", None), tx_trace.get("blockNum", None), tx_trace.get("blockTime", None), tx_trace.get("receipt", {}).get("status", None), tx_trace.get("receipt", {}).get("cpuUsageMicroSeconds", None), tx_trace.get("netUsage", None), tx_trace.get("elapsed", None), list( map( lambda act_trace: ActionTrace( act_trace.get("transactionId", None), act_trace.get("blockNum", None), act_trace.get("actionOrdinal", None), Action( act_trace.get("action", {}).get("account", None), act_trace.get("action", {}).get("name", None), act_trace.get("action", {}).get( "jsonData", {"from": None, "to": None} ), ), act_trace.get("elapsed", None), act_trace.get("action", {}).get( "authorization", [{"actor": None}] )[0]["actor"], act_trace.get("receiver", None), ), tx_trace.get("actionTraces", None), ) ), ), block.get("filteredTransactionTraces", []), ) ), block.get("filteredTransactionCount", 0), )
05931685b970a562b108df134e26c6857bd9bb6a
19,115
from pandas import get_option def repr_pandas_Series(series, _): """ This function can be configured by setting the `max_rows` attributes. """ return series.to_string( max_rows=repr_pandas_Series.max_rows, name=series.name, dtype=series.dtype, length=get_option("display.show_dimensions"), )
86009d8fc1559dd97361a8c5e113c5477ff73de2
19,116
def convert_fmt(fmt): """rs.format to pyglet format string""" return { rs.format.rgb8: 'RGB', rs.format.bgr8: 'BGR', rs.format.rgba8: 'RGBA', rs.format.bgra8: 'BGRA', rs.format.y8: 'L', }[fmt]
b2f34498969d2e29d8c21367788ddcaebe205acf
19,117
import math def train_ALS(train_data, validation_data, num_iters, reg_param, ranks): """ Grid Search Function to select the best model based on RMSE of hold-out data """ # initial min_error = float('inf') best_rank = -1 best_regularization = 0 best_model = None for rank in ranks: for reg in reg_param: # train ALS model model = ALS.train( ratings=train_data, # (userID, productID, rating) tuple iterations=num_iters, rank=rank, lambda_=reg, # regularization param seed=99) # make prediction valid_data = validation_data.map(lambda p: (p[0], p[1])) predictions = model.predictAll(valid_data).map(lambda r: ((r[0], r[1]), r[2])) # get the rating result ratesAndPreds = validation_data.map(lambda r: ((r[0], r[1]), r[2])).join(predictions) # get the RMSE MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean() error = math.sqrt(MSE) print('{} latent factors and regularization = {}: validation RMSE is {}'.format(rank, reg, error)) if error < min_error: min_error = error best_rank = rank best_regularization = reg best_model = model print('\nThe best model has {} latent factors and regularization = {}'.format(best_rank, best_regularization)) return best_model
99d0584e9374a529632024caeadb88d85c681b81
19,118
import copy def response_ack(**kwargs): """ Policy-based provisioning of ACK value. """ try: tlv, code, policy, post_c2c = kwargs["tlv"], kwargs["code"], kwargs["policy"], kwargs["post_c2c"] new_tlv = copy.deepcopy(tlv) if post_c2c is not True: ret = policy.get_available_policy(new_tlv) if ret == None: new_tlv["notAvailable"] new_tlv['ope'] = 'info' return [new_tlv] except Exception as ex: print("Exception in response_ack()", ex) return None
ff34cf196e0d565ebac7700f5b412f615685ca37
19,121
def is_file(path, use_sudo=False): """ Check if a path exists, and is a file. """ func = use_sudo and sudo or run with settings(hide('running', 'warnings'), warn_only=True): return func('[ -f "%(path)s" ]' % locals()).succeeded
9b3402205fe972dbedfa582117b6d03bdb949122
19,122
def bounded_random_walk(minval, maxval, delta_min, delta_max, T, dtype=tf.float32, dim=1): """ Simulates a random walk with boundary conditions. Used for data augmentation along entire tube. Based on: https://stackoverflow.com/questions/48777345/vectorized-random- walk-in-python-with-boundaries Args: minval (int/float): Minimum value. maxval (int/float): Maximum value. delta_min (int/float): Minimum change. delta_max (int/float): Maximum change. T (int): Length of sequence. dtype (type): Data type of walk. dim (int): Dimension. Returns: Tensor (T x dim). """ if maxval <= minval: return tf.ones((T, dim)) * minval # Don't do this yet for consistency if minval == delta_min and maxval == delta_max: print('Using the old data augmentation!') walk = tf.random_uniform( shape=(T, dim), minval=minval, maxval=maxval, dtype=dtype, ) return walk start = tf.random_uniform( shape=(1, dim), minval=minval, maxval=maxval, dtype=dtype, ) size = maxval - minval walk = tf.cumsum(tf.random_uniform( shape=(T, dim), minval=delta_min, maxval=delta_max, dtype=dtype, )) return tf.abs((walk + start - minval + size) % (2 * size) - size) + minval
18bba29b9f0c320da04eb2419a49483ee301e178
19,123
def validate_photo_url(photo_url, required=False): """Parses and validates the given URL string.""" if photo_url is None and not required: return None if not isinstance(photo_url, str) or not photo_url: raise ValueError( 'Invalid photo URL: "{0}". Photo URL must be a non-empty ' 'string.'.format(photo_url)) try: parsed = parse.urlparse(photo_url) if not parsed.netloc: raise ValueError('Malformed photo URL: "{0}".'.format(photo_url)) return photo_url except Exception: raise ValueError('Malformed photo URL: "{0}".'.format(photo_url))
9c6d617d4b618f626c29977b0a7c4c9dc9b3f9ab
19,124
def to_flp(stipples, dpi=300, x_mm=0, y_mm=0, laser_pwr=35000, ticks=500, base=100): """" Converts a set of stipples into a list of FLP packets dpi is the image's DPI x_mm and y_mm are the corner location of the image (default 0,0) (where 0,0 is the center of the build platform) laser_power is the laser's power level in ticks ticks is the number of frames the laser spends a black point base is the number of frames the laser spends on a white point """ # Accumulated list of FLP packets packets = F.Packets() # Sort by X to reduce the amount of laser moves necessary stipples = sorted(stipples, key=lambda s: s[0]) # Draw stuff for every point for x, y, i in stipples: # Center position in mm x = mm_to_pos(x / float(dpi) * 25.4 + x_mm) y = mm_to_pos(y / float(dpi) * 25.4 + y_mm) # Decide how long to stay on this point (longer time = darker point) t = int(ceil((ticks - base) * (1 - i)) + base) if t == 0: continue # Move to this stipple's location with the laser off, then pause # briefly to let the controller stabilize packets.append(F.LaserPowerLevel(0)) packets.append(F.XYMove([[x, y, 200], [x, y, 100]])) # Draw the spot with the laser on packets.append(F.LaserPowerLevel(laser_pwr)) packets.append(F.XYMove([[x, y, t]])) return packets
9d826b6174478cbfb3d2033e05b9ccafb5dca79c
19,125
def GetSchema(component): """convience function for finding the parent XMLSchema instance. """ parent = component while not isinstance(parent, XMLSchema): parent = parent._parent() return parent
3445acb7bade3cca15d4eeeb3da4d548ea44a206
19,126
def ranked_bots_query(alias="ranked_bots"): """ Builds a query that ranks all bots. This is a function in case you need this as a subquery multiple times. """ return sqlalchemy.sql.select([ bots.c.user_id, bots.c.id.label("bot_id"), bots.c.mu, bots.c.sigma, bots.c.score, bots.c.games_played, bots.c.version_number, bots.c.language, bots.c.update_time, bots.c.compile_status, sqlalchemy.sql.func.rank().over( order_by=bots.c.score.desc() ).label("bot_rank"), sqlalchemy.sql.func.rank().over( partition_by=users.c.organization_id, order_by=bots.c.score.desc() ).label("bot_organization_rank"), ]).select_from( bots.join(users, bots.c.user_id == users.c.id) ).where( users.c.is_active == True ).order_by( bots.c.score.desc() ).alias(alias)
f6641efa611884721e33453f4fcc4af0503b4aaf
19,127
def marathon_deployments_check(service): """Checks for consistency between deploy.yaml and the marathon yamls""" the_return = True pipeline_deployments = get_pipeline_config(service) pipeline_steps = [step['instancename'] for step in pipeline_deployments] pipeline_steps = [step for step in pipeline_steps if step not in DEPLOY_PIPELINE_NON_DEPLOY_STEPS] marathon_steps = get_marathon_steps(service) in_marathon_not_deploy = set(marathon_steps) - set(pipeline_steps) if len(in_marathon_not_deploy) > 0: print "%s There are some instance(s) you have asked to run in marathon that" % x_mark() print " do not have a corresponding entry in deploy.yaml:" print " %s" % PaastaColors.bold(", ".join(in_marathon_not_deploy)) print " You should probably add entries to deploy.yaml for them so they" print " are deployed to those clusters." the_return = False in_deploy_not_marathon = set(pipeline_steps) - set(marathon_steps) if len(in_deploy_not_marathon) > 0: print "%s There are some instance(s) in deploy.yaml that are not referenced" % x_mark() print " by any marathon instance:" print " %s" % PaastaColors.bold((", ".join(in_deploy_not_marathon))) print " You should probably delete these deploy.yaml entries if they are unused." the_return = False if the_return is True: print success("All entries in deploy.yaml correspond to a marathon entry") print success("All marathon instances have a corresponding deploy.yaml entry") return the_return
3f2df53652efad4b731a05b3ecc17929d65982ac
19,128
def get_book_info(book_id, books): """Obtain meta data of certain books. :param book_id: Books to look up :type: int or list of ints :param books: Dataframe containing the meta data :type: pandas dataframe :return: Meta data for the book ids :rtype: List[str], List[str], List[str] """ if not isinstance(book_id, list): book_id = [book_id] book_authors, book_titles, book_img_urls = [], [], [] for i in book_id: book_info = books.loc[books["book_id"]==i].squeeze() if book_info.shape[0]==0: raise ValueError("Could not find book_id {} in the dataset.".format(book_id)) book_authors.append(book_info.authors) book_titles.append(book_info.title) book_img_urls.append(book_info.image_url) return book_authors, book_titles, book_img_urls
64a91a498f9bf9df918d256a7ce705e98dadbbd9
19,129
import functools import six def save_error_message(func): """ This function will work only if transition_entity is defined in kwargs and transition_entity is instance of ErrorMessageMixin """ @functools.wraps(func) def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except Exception as exception: message = six.text_type(exception) transition_entity = kwargs['transition_entity'] if message: transition_entity.error_message = message transition_entity.save(update_fields=['error_message']) raise exception return wrapped
9ac592100445a0232efc4afaa3807b050c8eddff
19,130
def EMV(data,n=20,m=23): """ """ def emv(high,low,vol,n=14): MID = np.zeros(len(high)) MID[1:] = (np.array(high[1:])+np.array(low[1:])-np.array(high[:-1])-np.array(low[:-1]))/2. BRO = np.array(vol)/(100000000.*(np.array(high)-np.array(low))) EM = MID/BRO return ta.SMA(EM,n) data['emv'] = emv(np.array(data.high),np.array(data.low),np.array(data.vol),n) data['maemv'] = ta.SMA(np.array(data['emv']),m) signal = pd.DataFrame(index=data.index) #strategy 1 """ EMV 大于0,买入,信号为1 EMV 小于0,卖出,信号为-1 常用参数:n=14 """ signal['1'] = (data['emv']>0)*2 - 1 #strategy 2 """ EMV 大于MAEMV,买入,信号为1 EMV 小于MAEMV,卖出,信号为-1 参数设为n=20,m=23 """ signal['2'] = (data['emv'] > data['maemv'])*2 - 1 signal = signal.fillna(0) return signal
a3555738c2f0c047ad4c21ae32dcfed460a9ec5b
19,131
from typing import Tuple def desired_directions(state: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Given the current state and destination, compute desired direction.""" destination_vectors = state[:, 4:6] - state[:, 0:2] directions, dist = normalize(destination_vectors) return directions, dist
02088734bd3ef6ec2e1b009d5c43e6ea9f008aab
19,132
import operator def binary_repr(number, max_length = 1025): """ Return the binary representation of the input *number* as a string. This is more efficient than using :func:`base_repr` with base 2. Increase the value of max_length for very large numbers. Note that on 32-bit machines, 2**1023 is the largest integer power of 2 which can be converted to a Python float. """ #assert number < 2L << max_length shifts = map (operator.rshift, max_length * [number], \ range (max_length - 1, -1, -1)) digits = map (operator.mod, shifts, max_length * [2]) if not digits.count (1): return 0 digits = digits [digits.index (1):] return ''.join (map (repr, digits)).replace('L','')
40d0198067722d8d4c1ef1e1a195ce6817ab0935
19,133
def df_fc_overlap_9(): """Scenario case with 3 sets of 2 overlapping fragments, bound to a common combination of 2 redundant fragments.""" mol = Chem.MolFromSmiles('NC1C(O)C(CCCC2CC2CCC2CC2)C1CCC1CC(C(N)C1O)C1CCC(O)C(N)C1') return DataFrame([ ['mol_fc_overlap_9', 'XXX', 'O1', 0, 'O1:0', 'O2', 0, 'O2:0', 'ffo', 'fusion', 'false_positive', 'overlap', (30, 29, 28, 27, 26, 33, 31), (32, 31, 29, 28, 27, 26, 33), 34, mol, mol_o1, mol_o2, 'O1:0@1,2,3,4,5,6[ffo]O2:0@1,2,3,4,5,6'], ['mol_fc_overlap_9', 'XXX', 'O1', 0, 'O1:0', 'O5', 0, 'O4:0', 'cm', 'connection', 'monopodal', '', (30, 29, 28, 27, 26, 33, 31), (25, 24, 22, 21, 20, 19), 34, mol, mol_o1, mol_o4, 'O1:0@4[cm]O4:0@3'], ['mol_fc_overlap_9', 'XXX', 'O1', 0, 'O1:0', 'O5', 0, 'O5:0', 'cm', 'connection', 'monopodal', '', (30, 29, 28, 27, 26, 33, 31), (23, 22, 21, 20, 19, 24), 34, mol, mol_o1, mol_o5, 'O1:0@4[cm]O5:0@2'], ['mol_fc_overlap_9', 'XXX', 'O2', 0, 'O2:0', 'O5', 0, 'O4:0', 'cm', 'connection', 'monopodal', '', (32, 31, 29, 28, 27, 26, 33), (25, 24, 22, 21, 20, 19), 34, mol, mol_o2, mol_o4, 'O2:0@5[cm]O4:0@3'], ['mol_fc_overlap_9', 'XXX', 'O2', 0, 'O2:0', 'O5', 0, 'O5:0', 'cm', 'connection', 'monopodal', '', (32, 31, 29, 28, 27, 26, 33), (23, 22, 21, 20, 19, 24), 34, mol, mol_o2, mol_o5, 'O2:0@5[cm]O5:0@2'], ['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O5', 0, 'O5:0', 'ffo', 'fusion', 'false_positive', 'overlap', (25, 24, 22, 21, 20, 19), (23, 22, 21, 20, 19, 24), 34, mol, mol_o4, mol_o5, 'O4:0@1,2,3,4,5[ffo]O5:0@1,2,3,4,5'], ['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O6', 0, 'O6:0', 'cm', 'connection', 'monopodal', '', (25, 24, 22, 21, 20, 19), (1, 2, 4, 16), 34, mol, mol_o4, mol_o6, 'O4:0@5[cm]O6:0@3'], ['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O8', 0, 'O8:0', 'cm', 'connection', 'monopodal', '', (25, 24, 22, 21, 20, 19), (0, 1, 2, 4, 16), 34, mol, mol_o4, mol_o8, 'O4:0@5[cm]O8:0@4'], ['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O9', 0, 'O9:0', 'cm', 'connection', 'monopodal', '', (25, 24, 22, 21, 20, 19), (3, 2, 1, 16, 4), 34, mol, mol_o4, mol_o9, 'O4:0@5[cm]O9:0@3'], ['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O5:0', 'O6', 0, 'O6:0', 'cm', 'connection', 'monopodal', '', (23, 22, 21, 20, 19, 24), (1, 2, 4, 16), 34, mol, mol_o5, mol_o6, 'O5:0@4[cm]O6:0@3'], ['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O5:0', 'O8', 0, 'O8:0', 'cm', 'connection', 'monopodal', '', (23, 22, 21, 20, 19, 24), (0, 1, 2, 4, 16), 34, mol, mol_o5, mol_o8, 'O5:0@4[cm]O8:0@4'], ['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O5:0', 'O9', 0, 'O9:0', 'cm', 'connection', 'monopodal', '', (23, 22, 21, 20, 19, 24), (3, 2, 1, 16, 4), 34, mol, mol_o5, mol_o9, 'O5:0@4[cm]O9:0@3'], ['mol_fc_overlap_9', 'XXX', 'O6', 0, 'O6:0', 'O7', 0, 'O7:0', 'cm', 'connection', 'monopodal', '', (1, 2, 4, 16), (8, 9, 10), 34, mol, mol_o6, mol_o7, 'O6:0@2[cm]O7:0@0'], ['mol_fc_overlap_9', 'XXX', 'O6', 0, 'O6:0', 'O8', 0, 'O8:0', 'ffs', 'fusion', 'false_positive', 'substructure', (1, 2, 4, 16), (0, 1, 2, 4, 16), 34, mol, mol_o6, mol_o8, 'O6:0@0,1,2,3[ffs]O8:0@1,2,3,4'], ['mol_fc_overlap_9', 'XXX', 'O6', 0, 'O6:0', 'O9', 0, 'O9:0', 'ffs', 'fusion', 'false_positive', 'substructure', (1, 2, 4, 16), (3, 2, 1, 16, 4), 34, mol, mol_o6, mol_o9, 'O6:0@0,1,2,3[ffs]O9:0@1,2,3,4'], ['mol_fc_overlap_9', 'XXX', 'O7', 0, 'O7:0', 'O7', 1, 'O7:1', 'cm', 'connection', 'monopodal', '', (8, 9, 10), (13, 14, 15), 34, mol, mol_o7, mol_o7, 'O7:0@2[cm]O7:1@0'], ['mol_fc_overlap_9', 'XXX', 'O7', 0, 'O7:0', 'O8', 0, 'O8:0', 'cm', 'connection', 'monopodal', '', (8, 9, 10), (0, 1, 2, 4, 16), 34, mol, mol_o7, mol_o8, 'O7:0@0[cm]O8:0@3'], ['mol_fc_overlap_9', 'XXX', 'O7', 0, 'O7:0', 'O9', 0, 'O9:0', 'cm', 'connection', 'monopodal', '', (8, 9, 10), (3, 2, 1, 16, 4), 34, mol, mol_o7, mol_o9, 'O7:0@0[cm]O9:0@4'], ['mol_fc_overlap_9', 'XXX', 'O8', 0, 'O8:0', 'O9', 0, 'O9:0', 'ffo', 'fusion', 'false_positive', 'overlap', (0, 1, 2, 4, 16), (3, 2, 1, 16, 4), 34, mol, mol_o8, mol_o9, 'O8:0@1,2,3,4[ffo]O9:0@1,2,3,4'], ], columns=['idm', 'inchikey', 'idf1', 'idxf1', 'fid1', 'idf2', 'idxf2', 'fid2', 'fcc', 'category', 'type', 'subtype', '_aidxf1', '_aidxf2', 'hac', 'mol', 'mol_frag_1', 'mol_frag_2', 'fc'])
5a86bce8741b76ac265b5a1865f7d3d1ad8970ea
19,134
import requests import json def check_cal(es_url, es_index, id): """Query for calibration file with specified input ID.""" query = { "query":{ "bool":{ "must": [ { "term": { "_id": id } }, ] } }, "fields": [], } if es_url.endswith('/'): search_url = '%s%s/_search' % (es_url, es_index) else: search_url = '%s/%s/_search' % (es_url, es_index) #logger.info("search_url: %s" % search_url) r = requests.post(search_url, data=json.dumps(query)) if r.status_code == 200: result = r.json() #logger.info(pformat(result)) total = result['hits']['total'] id = 'NONE' if total == 0 else result['hits']['hits'][0]['_id'] else: logger.error("Failed to query %s:\n%s" % (es_url, r.text)) logger.error("query: %s" % json.dumps(query, indent=2)) logger.error("returned: %s" % r.text) if r.status_code == 404: total, id = 0, 'NONE' else: r.raise_for_status() return total, id
10aab4dd6587b901cda543298c13662e6edeb0e1
19,135
def mountpoint_create(name, size): """Service Layer to create mountpoint""" mountpoint = MountPoint(name, size) return mountpoint
d52be1773b3cfad62423d2695b42d56ca83f7eaf
19,136
import requests import json def GetTSAWaitTimes(airportCode): """ Returns data from the TSA Wait Times API for a particular airport shortcode. :param airportCode: 3-letter shortcode of airport :return: Returns the full parsed json data from TSA Wait Times API """ base_url = "http://apps.tsa.dhs.gov/MyTSAWebService/GetTSOWaitTimes.ashx" params_tsa_d = {} params_tsa_d['ap'] = airportCode params_tsa_d['output'] = 'json' try: ## Uncomment this line if you want to get with caching for testing purposes #tsa_result_diction = json.loads(get_with_caching(base_url, params_tsa_d, saved_cache, cache_fname)) ## Comment out these two lines if you want to enable caching results_tsa = requests.get(base_url, params=params_tsa_d) tsa_result_diction = json.loads(results_tsa.text) return tsa_result_diction except Exception: print("Error: Unable to load TSA wait times. Please try again.") print("Exception: ") # sys.exit(1) quit()
bd03be14c95a3892ac75a0396da12ca04b52a59b
19,137
def False(context): """Function: <boolean> false()""" return boolean.false
93d1f1c9fbe9cf7bb02d5caac2c01ed7d0d9a2dc
19,138
def namedtuple_to_dict(model_params): """Transfers model specification from a named tuple class object to dictionary.""" init_dict = {} init_dict["GENERAL"] = {} init_dict["GENERAL"]["num_periods"] = model_params.num_periods init_dict["GENERAL"]["num_choices"] = model_params.num_choices init_dict["CONSTANTS"] = {} init_dict["CONSTANTS"]["delta"] = model_params.delta init_dict["CONSTANTS"]["mu"] = model_params.mu init_dict["CONSTANTS"]["benefits"] = model_params.benefits init_dict["INITIAL_CONDITIONS"] = {} init_dict["INITIAL_CONDITIONS"]["educ_max"] = model_params.educ_max init_dict["INITIAL_CONDITIONS"]["educ_min"] = model_params.educ_min init_dict["SIMULATION"] = {} init_dict["SIMULATION"]["seed_sim"] = model_params.seed_sim init_dict["SIMULATION"]["num_agents_sim"] = model_params.num_agents_sim init_dict["SOLUTION"] = {} init_dict["SOLUTION"]["seed_emax"] = model_params.seed_emax init_dict["SOLUTION"]["num_draws_emax"] = model_params.num_draws_emax init_dict["PARAMETERS"] = {} init_dict["PARAMETERS"]["optim_paras"] = model_params.optim_paras init_dict["DERIVED_ATTR"] = {} init_dict["DERIVED_ATTR"]["educ_range"] = model_params.educ_range init_dict["DERIVED_ATTR"]["shocks_cov"] = model_params.shocks_cov return init_dict
9ac2f23aff3b9c57599eb2c2c6cacd455ac711a5
19,139
def roberts(stream: Stream, *args, **kwargs) -> FilterableStream: """https://ffmpeg.org/ffmpeg-filters.html#roberts""" return filter(stream, roberts.__name__, *args, **kwargs)
ff58eaea65d536b47614050600c91136dc2d6f7e
19,140
import json def run_code(): """ codec api response { "error": { "decode:": "error message" }, "output": { "status_code": 0, "result": { "data_type": "event", "data": { "humidity": { "time": 1547660823, "value": 34 }, "temperature": { "time": 1547660823, "value": -3.7 } } } } } """ request_json = CodeRunSchema.validate_request() analog_type = request_json.get('analogType') protocol = db.session.query(Product.cloudProtocol) \ .filter(Product.productID == request_json.get('productID')) \ .scalar() if protocol is None: raise DataNotFound(field='productID') request_url = f"http://{current_app.config['CODEC_NODE']}/api/v1/codec" with SyncHttp() as sync_http: response = sync_http.post(request_url, json=request_json) if response.responseCode != 200: try: errors = json.loads(response.responseContent) except Exception: errors = { 'codec': response.responseContent } raise APIException(errors=errors) response_json = json.loads(response.responseContent) # return response if it has error if 'error' in response_json: return jsonify(response_json) output_data = response_json.get('output') status_code = output_data.get('status_code') # If status code is 1(ERROR) # or analog type is 2(encode) # return response without validate if status_code == 1 or analog_type == 2: return jsonify(response_json) result = output_data.get('result') error_dict = {} validate_data, validate_error = DecodeSchema().load(result) for key, value in validate_error.items(): error_dict[key] = value[0][:-1] data_stream = DataStream.query \ .filter(DataStream.productID == request_json.get('productID'), DataStream.tenantID == g.tenant_uid, DataStream.topic == request_json.get('topic'), DataStream.streamID == validate_data.get('stream_id')) \ .first() if not data_stream: raise DataNotFound(field='data_stream') error, passed_data = validate_decode_response(data_stream, validate_data) error_dict.update(error) record = { 'output': { 'status_code': status_code, 'result': passed_data } } if error_dict: record['error'] = error_dict return jsonify(record)
96118a1c74b027716a68d7c1f25eb3585e1a255c
19,141
def build_dense_conf_block(x, filter_size=32, dropout_rate=None): """ builds a dense block according to https://arxiv.org/pdf/1608.06993.pdf :param x: :param dropout_rate: :param filter_size :return: """ x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x) x = Activation('relu')(x) x = Conv2D(filter_size * 4, (1, 1), padding='same')(x) x = Conv2D(filter_size, (3, 3), padding='same')(x) if dropout_rate: x = Dropout(dropout_rate)(x) return x
2cb9639ed620d32c513ecbccf2c311360cc3cb9d
19,143
def _viz_flow(u, v, logscale=True, scaledown=6): """ Copied from @jswulff: https://github.com/jswulff/pcaflow/blob/master/pcaflow/utils/viz_flow.py top_left is zero, u is horizon, v is vertical red is 3 o'clock, yellow is 6, light blue is 9, blue/purple is 12 """ color_wheel = _color_wheel() n_cols = color_wheel.shape[0] radius = np.sqrt(u ** 2 + v ** 2) if logscale: radius = np.log(radius + 1) radius = radius / scaledown rot = np.arctan2(-v, -u) / np.pi fk = (rot + 1) / 2 * (n_cols - 1) # -1~1 mapped to 0~n_cols k0 = fk.astype(np.uint8) # 0, 1, 2, ..., n_cols k1 = k0 + 1 k1[k1 == n_cols] = 0 f = fk - k0 n_colors = color_wheel.shape[1] img = np.zeros(u.shape + (n_colors,)) for i in range(n_colors): tmp = color_wheel[:, i] col0 = tmp[k0] col1 = tmp[k1] col = (1 - f) * col0 + f * col1 idx = radius <= 1 # increase saturation with radius col[idx] = 1 - radius[idx] * (1 - col[idx]) # out of range col[~idx] *= 0.75 img[:, :, i] = np.floor(255 * col).astype(np.uint8) return img.astype(np.uint8)
43901f227bc30367910bc41f9ba324bcc217bdbf
19,144
def submission_history(request, course_id, learner_identifier, location): """Render an HTML fragment (meant for inclusion elsewhere) that renders a history of all state changes made by this user for this problem location. Right now this only works for problems because that's all StudentModuleHistory records. """ found_user_name = get_learner_username(learner_identifier) if not found_user_name: return HttpResponse(escape(_('User does not exist.'))) course_key = CourseKey.from_string(course_id) try: usage_key = UsageKey.from_string(location).map_into_course(course_key) except (InvalidKeyError, AssertionError): return HttpResponse(escape(_('Invalid location.'))) course = get_course_overview_with_access(request.user, 'load', course_key) staff_access = bool(has_access(request.user, 'staff', course)) # Permission Denied if they don't have staff access and are trying to see # somebody else's submission history. if (found_user_name != request.user.username) and (not staff_access): raise PermissionDenied user_state_client = DjangoXBlockUserStateClient() try: history_entries = list(user_state_client.get_history(found_user_name, usage_key)) except DjangoXBlockUserStateClient.DoesNotExist: return HttpResponse(escape(_('User {username} has never accessed problem {location}').format( username=found_user_name, location=location ))) # This is ugly, but until we have a proper submissions API that we can use to provide # the scores instead, it will have to do. csm = StudentModule.objects.filter( module_state_key=usage_key, student__username=found_user_name, course_id=course_key) scores = BaseStudentModuleHistory.get_history(csm) if len(scores) != len(history_entries): log.warning( "Mismatch when fetching scores for student " "history for course %s, user %s, xblock %s. " "%d scores were found, and %d history entries were found. " "Matching scores to history entries by date for display.", course_id, found_user_name, location, len(scores), len(history_entries), ) scores_by_date = { score.created: score for score in scores } scores = [ scores_by_date[history.updated] for history in history_entries ] context = { 'history_entries': history_entries, 'scores': scores, 'username': found_user_name, 'location': location, 'course_id': str(course_key) } return render_to_response('courseware/submission_history.html', context)
dd0459844b4f30e653dacf474cdb5ddf186ed0dc
19,145
def tou(month, weekday, hour): """ Calculate TOU pricing """ if weekday in [0, 6]: return OFFPEAK else: if month in [5, 6, 7, 8, 9, 10]: if hour in [11, 12, 13, 14, 15, 16]: return ONPEAK elif hour in [7, 8, 9, 10, 17, 18, 19, 20]: return MIDPEAK else: return OFFPEAK else: if hour in [11, 12, 13, 14, 15, 16]: return MIDPEAK elif hour in [7, 8, 9, 10, 17, 18, 19, 20]: return ONPEAK else: return OFFPEAK
31708916be97d52d229499053b0b3d29603fdfb9
19,146
import json def get_job(request): """ Retrieve a specific Job URL: /admin/Jobs/GetOne :param request: :return: """ id = request.GET.dict().get("id") response = { 'status': 1, 'status_message': 'Success', 'job': job.objects.filter(id=id) } return HttpResponse(json.dumps(response))
82d4c981b48fb0274ae4f2f888149b11ed731b88
19,147
def cpt_lvq_merid_deriv(temp, sphum): """Meridional derivative of c_p*T + L_v*q on pressure coordinates.""" deriv_obj = LatCenDeriv(cpt_lvq(temp, sphum), LAT_STR) return deriv_obj.deriv()
629a630bb0663b16f20fb0f7d68ca54ebebc7e21
19,148
def hubert_pretrain_large( encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0, ) -> HuBERTPretrainModel: # Overriding the signature so that the return type is correct on Sphinx """hubert_pretrain_large(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0) -> torchaudio.models.HuBERTPretrainModel Build HuBERTPretrainModel model for pre-training with "large" architecture from *HuBERT* [:footcite:`hsu2021hubert`] Args: encoder_projection_dropout (float): See :py:func:`hubert_pretrain_model`. encoder_attention_dropout (float): See :py:func:`hubert_pretrain_model`. encoder_ff_interm_dropout (float): See :py:func:`hubert_pretrain_model`. encoder_dropout (float): See :py:func:`hubert_pretrain_model`. encoder_layer_drop (float): See :py:func:`hubert_pretrain_model`. Returns: HuBERTPretrainModel: The resulting model. """ # noqa: E501 return hubert_pretrain_model( extractor_mode="layer_norm", extractor_conv_layer_config=None, extractor_conv_bias=False, encoder_embed_dim=1024, encoder_projection_dropout=encoder_projection_dropout, encoder_pos_conv_kernel=128, encoder_pos_conv_groups=16, encoder_num_layers=24, encoder_num_heads=16, encoder_attention_dropout=encoder_attention_dropout, encoder_ff_interm_features=4096, encoder_ff_interm_dropout=encoder_ff_interm_dropout, encoder_dropout=encoder_dropout, encoder_layer_norm_first=True, encoder_layer_drop=encoder_layer_drop, mask_prob=0.80, mask_selection="static", mask_other=0.0, mask_length=10, no_mask_overlap=False, mask_min_space=1, mask_channel_prob=0.0, mask_channel_selection="static", mask_channel_other=0.0, mask_channel_length=10, no_mask_channel_overlap=False, mask_channel_min_space=1, skip_masked=False, skip_nomask=False, num_classes=500, final_dim=768, )
dd57cfcb803424ed46fcb597a71aa8e88de3ad32
19,150
def random_scaled_rotation(ralpha=(-0.2, 0.2), rscale=((0.8, 1.2), (0.8, 1.2))): """Compute a random transformation matrix for a scaled rotation. :param ralpha: range of rotation angles :param rscale: range of scales for x and y :returns: random transformation """ affine = np.eye(2) if rscale is not None: (x0, x1), (y0, y1) = rscale affine = np.diag([npr.uniform(x0, x1), npr.uniform(y0, y1)]) if ralpha is not None: a0, a1 = ralpha a = npr.uniform(a0, a1) c = cos(a) s = sin(a) m = np.array([[c, -s], [s, c]], 'f') affine = np.dot(m, affine) return affine
f6216486e94fa7eac0be75b2a420fc1f251987c2
19,151
import time def time_as_int() -> int: """ Syntactic sugar for >>> from time import time >>> int(time()) """ return int(time.time())
f7f6d037d156c09a01c0ff13f8b43418133ab1b0
19,152
from unittest.mock import Mock from unittest.mock import patch def test_end_response_is_one_send(): """Test that ``HAPServerHandler`` sends the whole response at once.""" class ConnectionMock: sent_bytes = [] def sendall(self, bytesdata): self.sent_bytes.append([bytesdata]) return 1 def getsent(self): return self.sent_bytes amock = Mock() with patch("pyhap.hap_server.HAPServerHandler.setup"), patch( "pyhap.hap_server.HAPServerHandler.handle_one_request" ), patch("pyhap.hap_server.HAPServerHandler.finish"): handler = hap_server.HAPServerHandler( "mocksock", "mockclient_addr", "mockserver", amock ) handler.request_version = "HTTP/1.1" handler.connection = ConnectionMock() handler.requestline = "GET / HTTP/1.1" handler.send_response(200) handler.wfile = MagicMock() handler.end_response(b"body") assert handler.connection.getsent() == [ [b"HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nbody"] ] assert handler._headers_buffer == [] # pylint: disable=protected-access assert handler.wfile.called_once()
7c28c6b6fb8f123daa75f9710c26d5345810160b
19,153
def compute_norm_cond_entropy_corr(data_df, attrs_from, attrs_to): """ Computes the correlations between attributes by calculating the normalized conditional entropy between them. The conditional entropy is asymmetric, therefore we need pairwise computation. The computed correlations are stored in a dictionary in the format: { attr_a: { cond_attr_i: corr_strength_a_i, cond_attr_j: corr_strength_a_j, ... }, attr_b: { cond_attr_i: corr_strength_b_i, ...} } :return a dictionary of correlations """ corr = {} # Compute pair-wise conditional entropy. for x in attrs_from: corr[x] = {} for y in attrs_to: # Set correlation to 1 for same attributes. if x == y: corr[x][y] = 1.0 continue xy_df = data_df[[x, y]] xy_df = xy_df.loc[~(xy_df[x] == NULL_REPR) & ~(xy_df[y] == NULL_REPR)] x_vals = xy_df[x] x_domain_size = x_vals.nunique() # Set correlation to 0.0 if entropy of x is 1 (only one possible value). if x_domain_size == 1 or len(xy_df) == 0: corr[x][y] = 0.0 continue # Compute the conditional entropy H(x|y) = H(x,y) - H(y). # H(x,y) denotes H(x U y). # If H(x|y) = 0, then y determines x, i.e., y -> x. # Use the domain size of x as a log base for normalization. y_vals = xy_df[y] x_y_entropy = drv.entropy_conditional(x_vals, y_vals, base=x_domain_size).item() # The conditional entropy is 0 for strongly correlated attributes and 1 for # completely independent attributes. We reverse this to reflect the correlation. corr[x][y] = 1.0 - x_y_entropy return corr
12dafa7ecb941c008ab2bb7c93ed6e0c8b1302ad
19,154
def should_retry_http_code(status_code): """ :param status_code: (int) http status code to check for retry eligibility :return: (bool) whether or not responses with the status_code should be retried """ return status_code not in range(200, 500)
69acb5bd34b06e1ff1e29630ac93e60a3ccc835c
19,155
def softmax(x): """Calculates the softmax for each row of the input x. Your code should work for a row vector and also for matrices of shape (n, m). Argument: x -- A numpy matrix of shape (n,m) Returns: s -- A numpy matrix equal to the softmax of x, of shape (n,m) """ # Apply exp() element-wise to x. Use np.exp(...). x_exp = np.exp(x) # Create a vector x_sum that sums each row of x_exp. Use np.sum(..., axis = 1, keepdims = True). x_sum = np.sum(x_exp, axis=1, keepdims=True) # Compute softmax(x) by dividing x_exp by x_sum. It should automatically use numpy broadcasting. s = x_exp / x_sum # print("x_exp: {}, x_sum: {}".format(x_exp.shape, x_sum.shape)) return s
d4905ec1a145aae47532b43a66a00a29180a37e4
19,156
def inertia_tensor_eigvals(image, mu=None, T=None): """Compute the eigenvalues of the inertia tensor of the image. The inertia tensor measures covariance of the image intensity along the image axes. (See `inertia_tensor`.) The relative magnitude of the eigenvalues of the tensor is thus a measure of the elongation of a (bright) object in the image. Parameters ---------- image : array The input image. mu : array, optional The pre-computed central moments of ``image``. T : array, shape ``(image.ndim, image.ndim)`` The pre-computed inertia tensor. If ``T`` is given, ``mu`` and ``image`` are ignored. Returns ------- eigvals : list of float, length ``image.ndim`` The eigenvalues of the inertia tensor of ``image``, in descending order. Notes ----- Computing the eigenvalues requires the inertia tensor of the input image. This is much faster if the central moments (``mu``) are provided, or, alternatively, one can provide the inertia tensor (``T``) directly. """ if T is None: T = inertia_tensor(image, mu) eigvals = np.linalg.eigvalsh(T) # Floating point precision problems could make a positive # semidefinite matrix have an eigenvalue that is very slightly # negative. This can cause problems down the line, so set values # very near zero to zero. eigvals = np.clip(eigvals, 0, None, out=eigvals) return sorted(eigvals, reverse=True)
af48827b709b48cdae7b2a8fe7ad3723845ee6cd
19,157
def extract_values(inst): """ :param inst: the instance :return: python values extracted from the instance """ # inst should already be python return inst
087bb00ee6e3666b4a9e682ca420623982a12102
19,158
def voc_ap(rec, prec, use_07_metric=False): """ ap = voc_ap(rec, prec, [use_07_metric]) Compute VOC AP given precision and recall. If use_07_metric is true, uses the VOC 07 11 point method (default:False). """ if use_07_metric: # 11 point metric ap = 0. for t in np.arange(0., 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / 11. else: # correct AP calculation # first append sentinel values at the end mrec = np.concatenate(([0.], rec, [1.])) mpre = np.concatenate(([0.], prec, [0.])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap
e9a4ebec8908e306bcf12e2e9538a8de8b74e84b
19,159
def is_on_path(prog): """Checks if a given executable is on the current PATH.""" r = runcmd("which %s" % prog) if r.failed: return False else: return r
1019ab3b08ef97c307588f8902a7884a89039998
19,160
def validate_entry(new_text) -> bool: """Função callback para validação de entrada dos campos na janela ExperimentPCR. É chamada toda vez que o usuário tenta inserir um valor no campo de entrada. Uma entrada válida deve atender os seguintes requisitos: -Ser composto apenas de números inteiros. -Ter um número de caracteres menor que 3. :param new_text: Passada pelo próprio widget de entrada. :return: boolean - Retorna pro widget se a entrada é ou não válida. """ if new_text == '': # Se "backspace" return True try: int(new_text) if len(new_text) <= 3: return len(new_text) <= 3 except ValueError: return False
8e0f5f126d0688279fc28a8be287fda00d346a59
19,161
import io def eia_cbecs_land_call(*, resp, url, **_): """ Convert response for calling url to pandas dataframe, begin parsing df into FBA format :param resp: df, response from url call :param url: string, url :return: pandas dataframe of original source data """ # Convert response to dataframe df_raw_data = pd.read_excel(io.BytesIO(resp.content), sheet_name='data') df_raw_rse = pd.read_excel(io.BytesIO(resp.content), sheet_name='rse') if "b5.xlsx" in url: # skip rows and remove extra rows at end of dataframe df_data = pd.DataFrame(df_raw_data.loc[15:32]).reindex() df_rse = pd.DataFrame(df_raw_rse.loc[15:32]).reindex() df_data.columns = ["Name", "All buildings", "New England", "Middle Atlantic", "East North Central", "West North Central", "South Atlantic", "East South Central", "West South Central", "Mountain", "Pacific"] df_rse.columns = ["Name", "All buildings", "New England", "Middle Atlantic", "East North Central", "West North Central", "South Atlantic", "East South Central", "West South Central", "Mountain", "Pacific"] df_rse = df_rse.melt(id_vars=["Name"], var_name="Location", value_name="Spread") df_data = df_data.melt(id_vars=["Name"], var_name="Location", value_name="FlowAmount") if "b12.xlsx" in url: # skip rows and remove extra rows at end of dataframe df_data1 = pd.DataFrame(df_raw_data[4:5]).reindex() df_data2 = pd.DataFrame(df_raw_data.loc[46:50]).reindex() df_data = pd.concat([df_data1, df_data2], ignore_index=True) df_rse1 = pd.DataFrame(df_raw_rse[4:5]).reindex() df_rse2 = pd.DataFrame(df_raw_rse.loc[46:50]).reindex() df_rse = pd.concat([df_rse1, df_rse2], ignore_index=True) # drop the empty columns at end of df df_data = df_data.iloc[:, 0:9] df_rse = df_rse.iloc[:, 0:9] df_data.columns = ["Description", "All buildings", "Office", "Warehouse and storage", "Service", "Mercantile", "Religious worship", "Education", "Public assembly"] df_rse.columns = ["Description", "All buildings", "Office", "Warehouse and storage", "Service", "Mercantile", "Religious worship", "Education", "Public assembly"] df_rse = df_rse.melt(id_vars=["Description"], var_name="Name", value_name="Spread") df_data = df_data.melt(id_vars=["Description"], var_name="Name", value_name="FlowAmount") if "b14.xlsx" in url: # skip rows and remove extra rows at end of dataframe df_data = pd.DataFrame(df_raw_data.loc[27:31]).reindex() df_rse = pd.DataFrame(df_raw_rse.loc[27:31]).reindex() # drop the empty columns at end of df df_data = df_data.iloc[:, 0:8] df_rse = df_rse.iloc[:, 0:8] df_data.columns = ["Description", "All buildings", "Food service", "Food sales", "Lodging", "Health care In-Patient", "Health care Out-Patient", "Public order and safety"] df_rse.columns = ["Description", "All buildings", "Food service", "Food sales", "Lodging", "Health care In-Patient", "Health care Out-Patient", "Public order and safety"] df_rse = df_rse.melt(id_vars=["Description"], var_name="Name", value_name="Spread") df_data = df_data.melt(id_vars=["Description"], var_name="Name", value_name="FlowAmount") df = pd.merge(df_rse, df_data) return df
396079863ecc2faa6420e90f3d608ff997a3fb39
19,162
def add_l2_interface(interface_name, interface_desc=None, interface_admin_state="up", **kwargs): """ Perform a POST call to create an Interface table entry for physical L2 interface. :param interface_name: Alphanumeric Interface name :param interface_desc: Optional description for the interface. Defaults to nothing if not specified. :param interface_admin_state: Optional administratively-configured state of the interface. Defaults to "up" if not specified :param kwargs: keyword s: requests.session object with loaded cookie jar keyword url: URL in main() function :return: True if successful, False otherwise """ if kwargs["url"].endswith("/v1/"): return port.add_l2_port(interface_name, interface_desc, interface_admin_state, **kwargs) else: # Updated else for when version is v10.04 return _add_l2_interface(interface_name, interface_desc, interface_admin_state, **kwargs)
d27b4b5ec738a5a508a3fc9d8852ecf5df56debe
19,163
import pkg_resources def language_descriptions(): """ Return a dict of `LanguageDesc` instances keyed by language name. """ global languages if languages is None: languages = {} for language in pkg_resources.WorkingSet().iter_entry_points( group='textx_languages'): register_language_with_project(language.load(), language.dist.project_name, language.dist.version) return languages
236b8fd595f1b4754eeca2b8b17a88fa36090ca5
19,164
import six def load_fixtures(fixtures_dict=None): """ Loads fixtures specified in fixtures_dict. This method must be used for fixtures that don't have associated data models. We simply want to load the meta into dict objects. fixtures_dict should be of the form: { 'actionchains': ['actionchain1.json', 'actionchain2.json'], 'workflows': ['workflow.yaml'] } :param fixtures_dict: Dictionary specifying the fixtures to load for each type. :type fixtures_dict: ``dict`` :rtype: ``dict`` """ if fixtures_dict is None: fixtures_dict = {} all_fixtures = {} fixtures_base_path = get_fixtures_base_path() for fixture_type, fixtures in six.iteritems(fixtures_dict): loaded_fixtures = {} for fixture in fixtures: fixture_path = fixtures_base_path + '/' + fixture fixture_dict = load_content(fixture_path) loaded_fixtures[fixture] = fixture_dict all_fixtures[fixture_type] = loaded_fixtures return all_fixtures
b43c1303a7c54a571a0e3ddf7881d7113371e293
19,165
def generate_sbm(sizes, probs, maxweight=1): """Generate a Stochastic Block Model graph. Assign random values drawn from U({1, ..., maxw}) to the edges. sizes : list of sizes (int) of the blocks probs : matrix of probabilities (in [0, 1]) of edge creation between nodes depending on the blocks they belong to maxweight : maximum value of the weights to randomly assign (default 1, resulting in weights all equal to 1) """ graph = nx.stochastic_block_model(sizes, probs) weights = 1 + np.random.choice(maxweight, len(graph.edges)) weights = dict(zip(graph.edges, weights)) nx.set_edge_attributes(graph, weights, 'weight') return graph
c6b0a106d88016afc99bf45abeb7c60af2981d77
19,166
import re def eq_portions(actual: str, expected: str): """ Compare whether actual matches portions of expected. The portions to ignore are of two types: - ***: ignore anything in between the left and right portions, including empty - +++: ignore anything in between left and right, but non-empty :param actual: string to test :param expected: expected string, containing at least one of the two patterns :return: a list of the portions ignored; if empty, it means there is no match. >>> eq_portions('', '+++aaaaaa***ccccc+++eeeeeee+++') () >>> eq_portions('_1__aaaaaa__2__ccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++') ('_1__', '__2__', '_3__', '_4_') >>> eq_portions('_1__aaaaaaccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++') ('_1__', '', '_3__', '_4_') >>> eq_portions('_1__aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee+++') () >>> eq_portions('aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee') () >>> eq_portions('aaaaaa_1__ccccc__2_eeeeeee', '***aaaaaa***ccccc+++eeeeeee***') ('', '_1__', '__2_', '') >>> eq_portions('aaaaaa___ccccc___eeeeeee', '***aaaaaa') () >>> eq_portions('aaaaaa___ccccc___eeeeeee', 'aaaaaa') Traceback (most recent call last): ... ValueError: The 'expected' argument must contain at least one *** OR +++ """ re_expect = re.escape(expected) ANYTHING = re.escape('\\*' * 3) SOMETHING = re.escape('\\+' * 3) if not re.search(ANYTHING, re_expect) and not re.search(SOMETHING, re_expect): raise ValueError("The 'expected' argument must contain at least one *** OR +++") re_expect = re.sub(SOMETHING, '(.+)', re_expect) re_expect = re.sub(ANYTHING, '(.*)', re_expect) matches = re.fullmatch(re_expect, actual) if not matches: return () return matches.groups()
704b2a83575347c5143c2dc0aca5227a8fc5bd4b
19,167
def _get_encoder( in_features: int, embed_dim: int, dropout_input: float, pos_conv_kernel: int, pos_conv_groups: int, num_layers: int, num_heads: int, attention_dropout: float, ff_interm_features: int, ff_interm_dropout: float, dropout: float, layer_norm_first: bool, layer_drop: float, ) -> Encoder: """ Args: in_features (int): The number of input features. embed_dim (int): The dimension of embedding. This option corresponds to "encoder_embed_dim" from fairseq. Expected values are 768 for Base arch, and 1024 for Large arch. dropout_input (float): The dropout probability applied after the input feature is projected to ``embed_dim``. This option corresponds to "dropout_input" from fairseq. Expected values are 0.1 for both Base and Large arch. pos_conv_kernel (int): The kernel size of convolutional positional embeddings. This option corresponds to "conv_pos" from fairseq. Expected values are 128 for both Base and Large arch. pos_conv_groups (int): The number of groups of convolutional positional embeddings. This option corresponds to "conv_pos_groups" from fairseq. Expected values are 16 for both Base and Large arch. num_layers (int): The number of self attention layers in transformer block. This option corresponds to "encoder_layers" from fairseq. Expected values are 12 for Base and 24 for Large arch. num_heads (int): The number of heads in self attention layers. This option corresponds to "encoder_attention_heads" from fairseq. Expected values are 12 for Base and 16 for Large arch. attention_dropout (float): The dropout probability applied after softmax in self-attention layer. This option corresponds to "attention_dropout" from fairseq. Expected values are 0.1 for Base and 0.0 for Large arch. ff_interm_features (int): The dimension of hidden features in feed forward layer. This option corresponds to "encoder_ffn_embed_dim" from fairseq. Expected values are 3072 for Base and 4096 for Large arch. ff_interm_dropout (float): The dropout probability applied in feedforward layer. This option correspinds to "activation_dropout" from fairseq. Expected values are 0.1 for both Base and Large arch. dropout (float): The dropout probability applied at the end of feed forward layer. This option corresponds to "dropout" from fairseq. Expected values are 0.1 for Base and 0.0 for Large arch. layer_norm_first (bool): Control the order of layer norm in transformer layer and each encoder layer. If True, in transformer layer, layer norm is applied before features are fed to encoder layers. In encoder layer, two layer norms are applied before and after self attention. If False, in transformer layer, layer norm is applied after features are fed to encoder layers. In encoder layer, two layer norms are applied after self attention, before and after feed forward. This option corresponds to "layer_norm_first" from fairseq. Expected values are False for Base and True for Large arch. layer_drop (float): Probability to drop each encoder layer during training. This option corresponds to "layerdrop" from fairseq. Expected values are 0.1 for both Base and Large arch. See Also: * "encoder_embed_dim" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L49-L51 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L64 * "dropout_input" - Def, base and large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L75-L78 * "conv_pos" - Def, base and large NOTE: The description is wrong. https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L204-L207 - Usage https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L756 * "conv_pos_groups" - Def, base and large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L208-L211 * "encoder_layers" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L46-L48 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L63 * "encoder_attention_heads" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L55-L57 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L66 * "attention_dropout" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L66-L68 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L60 * "encoder_ffn_embed_dim" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L52-L54 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L65 * "activation_dropout" - Def https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L69-L71 - Base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L55 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L55 * "dropout" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L63-L65 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L59 * "layer_norm_first" - Def and base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L91-L93 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L53 * "layerdrop" - Def https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L72-L74 - Base https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L54 - Large https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L54 """ feature_projection = FeatureProjection(in_features, embed_dim, dropout_input) pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups) # Original impl # https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782 encoder_layers = nn.ModuleList() for _ in range(num_layers): attention = SelfAttention( embed_dim=embed_dim, num_heads=num_heads, dropout=attention_dropout, ) feed_forward = FeedForward( io_features=embed_dim, intermediate_features=ff_interm_features, intermediate_dropout=ff_interm_dropout, output_dropout=dropout, ) encoder_layers.append( EncoderLayer( attention=attention, dropout=dropout, layer_norm_first=layer_norm_first, feed_forward=feed_forward, ) ) transformer = Transformer( pos_conv_embed=pos_conv, dropout=dropout, layers=encoder_layers, layer_norm_first=not layer_norm_first, layer_drop=layer_drop, ) return Encoder(feature_projection, transformer)
72ff9887575905172db0b095b3d6822ee6b51411
19,168
def _soft_threshold(a, b): """Soft-threshold operator for the LASSO and elastic net.""" return np.sign(a) * np.clip(np.abs(a) - b, a_min=0, a_max=None)
34f28c1154cf9eefecc19e1ece8dfa3ca82e677e
19,169
def predict(input_tokens): """register predict method in pangu-alpha""" token_ids, valid_length = register.call_preprocess(preprocess, input_tokens) ############# two output ################### # p, p_args = register.call_servable(token_ids) # add_token = register.call_postprocess(postprocess, p, p_args, valid_length) ############################################# ################# one output #################### logits = register.call_servable(token_ids) add_token = register.call_postprocess(postprocess_topk, logits, valid_length) return add_token
f2de6ff2ba78c3cac47a823bfe7a201a9a6b93ad
19,170