content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def find_keyword(URL, title, keywords): """ find keyword helper function of history_list """ for keyword in keywords: # case insensitive if len(keyword) > 0 and (URL is not None and keyword.lower() in URL.lower()) or (title is not None and keyword.lower() in title.lower()): return True return False
b956cc3744411a409a227cb80423dcf52ca9d248
33,756
def parsetypes(dtype): """ Parse the types from a structured numpy dtype object. Return list of string representations of types from a structured numpy dtype object, e.g. ['int', 'float', 'str']. Used by :func:`tabular.io.saveSV` to write out type information in the header. **Parameters** **dtype** : numpy dtype object Structured numpy dtype object to parse. **Returns** **out** : list of strings List of strings corresponding to numpy types:: [dtype[i].name.strip('1234567890').rstrip('ing') \ for i in range(len(dtype))] """ return [dtype[i].name.strip('1234567890').rstrip('ing') for i in range(len(dtype))]
6f373135f751b243104cc7222326d995048d7c93
33,757
import random def random_flip(image): """50% chance to flip the image for some variation""" if random.random() < 0.5: image = cv2.flip(image, 1) # 1 = vertical flip return image
8e9898dd0a505f4db4a56e6cea8fa8631790e6c4
33,758
def sites_only(exclude_isoforms=False): """Return PhosphositePlus data as a flat list of proteins and sites. Parameters ---------- exclude_isoforms : bool Whether to exclude sites for protein isoforms. Default is False (includes isoforms). Returns ------- list of tuples Each tuple consists of (uniprot_id, residue, position). """ sites = [] (data_by_up, data_by_site_grp) = _get_phospho_site_dataset() for up_id, respos_dict in data_by_up.items(): if exclude_isoforms and '-' in up_id: continue for respos in respos_dict.keys(): res = respos[0] pos = respos[1:] sites.append((up_id, res, pos)) return sites
b3c021e7e4332274c875b189b0f73f7ad3e43e0d
33,759
def transform_url(url, qparams=None, **kwargs): """ Modify url :param url: url to transform (can be relative) :param qparams: additional query params to add to end of url :param kwargs: pieces of URL to modify - e.g. netloc=localhost:8000 :return: Modified URL .. versionadded:: 3.2.0 """ if not url: return url link_parse = urlsplit(url) if qparams: current_query = dict(parse_qsl(link_parse.query)) current_query.update(qparams) link_parse = link_parse._replace(query=urlencode(current_query)) return urlunsplit(link_parse._replace(**kwargs))
d19d5845e6ebe4d14579849ed937160dc71a0421
33,760
def coords_in_bbox(pose, bbox): """ Return coords normalized to interval's bbox as dict of np arrays. origin at box center, (.5, .5) """ x1, x2, y1, y2 = bbox width = x2 - x1 height = y2 - y1 origin = np.array([x1, y1]) scale = np.array([width, height]) normalized_pose = {} for joint in pose: normalized_pose[joint] = np.array(pose[joint][:-1]) normalized_pose[joint] = (normalized_pose[joint] - origin) / scale - np.array([.5, .5]) return normalized_pose
96f98fe26bff8095311966fc640f887464c7cd4f
33,761
def display_instances(image, boxes, masks, ids, names, scores): """ take the image and results and apply the mask, box, and Label """ n_instances = boxes.shape[0] colors = random_colors(n_instances) if not n_instances: print('NO INSTANCES TO DISPLAY') else: assert boxes.shape[0] == masks.shape[-1] == ids.shape[0] for i, color in enumerate(colors): if not np.any(boxes[i]): continue y1, x1, y2, x2 = boxes[i] label = names[ids[i]] score = scores[i] if scores is not None else None caption = '{} {:.2f}'.format(label, score) if score else label mask = masks[:, :, i] image = apply_mask(image, mask, color) image = cv2.rectangle(image, (x1, y1), (x2, y2), color, 2) image = cv2.putText( image, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, color, 2 ) return image
60bdc9b8500dc0004837a630c72c361204820328
33,762
def _find_xy(ll, T, M, maxiter, atol, rtol, low_path): """Computes all x, y for given number of revolutions.""" # For abs(ll) == 1 the derivative is not continuous assert abs(ll) < 1 M_max = np.floor(T / pi) T_00 = np.arccos(ll) + ll * np.sqrt(1 - ll ** 2) # T_xM # Refine maximum number of revolutions if necessary if T < T_00 + M_max * pi and M_max > 0: _, T_min = _compute_T_min(ll, M_max, maxiter, atol, rtol) if T < T_min: M_max -= 1 # Check if a feasible solution exist for the given number of revolutions # This departs from the original paper in that we do not compute all solutions if M > M_max: raise ValueError("No feasible solution, try lower M!") # Initial guess x_0 = _initial_guess(T, ll, M, low_path) # Start Householder iterations from x_0 and find x, y x, numiter, tpi = _householder(x_0, T, ll, M, atol, rtol, maxiter) y = _compute_y(x, ll) return x, y, numiter, tpi
09bfe02e08e7b9975f65a99cb1e706ba96ed71f9
33,764
def ligne_vivante(ligne, x_1, x_2): """ Retourne un booléen indiquant si la ligne spécifiée contient au moins une cellule vivante """ for colonne in range(x_1, x_2 + 1): if plateau[ligne][colonne] != CELLULE_MORTE: return True return False
db9be50c14458eb11ee853d15fe5997ea857d55a
33,765
def changeMatchingReciprocity(G, i, j): """ change statistic for categorical matching reciprocity """ return G.catattr[i] == G.catattr[j] and G.isArc(j, i)
30ece553661b71e8cb2674d88b0daa4905dd9039
33,766
def compare_letters(letter1, letter2, table=ambiguity_code_to_nt_set): """Compare two extended nucleotide letters and return True if they match""" set1 = table[letter1] set2 = table[letter2] if set1 & set2 != set(): is_match = True else: is_match = False return is_match
190d00cb52890e52f58e07192227f9af7173ee35
33,767
import numpy as np def get_swarm_yspans(coll, round_result=False, decimals=12): """ Given a matplotlib Collection, will obtain the y spans for the collection. Will return None if this fails. Modified from `get_swarm_spans` in plot_tools.py. """ _, y = np.array(coll.get_offsets()).T try: if round_result: return np.around(y.min(), decimals), np.around(y.max(),decimals) else: return y.min(), y.max() except ValueError: return None
2561f04243e63dfa87896e891ae337ab9be310a7
33,768
def secondes(heure): """Prend une heure au format `H:M:S` et renvoie le nombre de secondes correspondantes (entier). On suppose que l'heure est bien formattée. On aura toujours un nombre d'heures valide, un nombre de minutes valide et un nombre de secondes valide. """ H, M, S = heure.split(":") return (3600 * int(H)) + (60 * int(M)) + int(S)
33d380005479d66041e747130a4451c555baf497
33,769
def central_crop(image, crop_height, crop_width, channels=3): """Performs central crops of the given image list. Args: image: a 3-D image tensor crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: 3-D tensor with cropped image. """ shape = tf.shape(image) height, width = shape[0], shape[1] amount_to_be_cropped_h = height - crop_height crop_top = amount_to_be_cropped_h // 2 amount_to_be_cropped_w = width - crop_width crop_left = amount_to_be_cropped_w // 2 # return tf.image.crop_to_bounding_box(image, crop_top, crop_left, crop_height, crop_width) size_assertion = tf.Assert( tf.logical_and( tf.greater_equal(height, crop_height), tf.greater_equal(width, crop_width)), ['Crop size greater than the image size.'] ) with tf.control_dependencies([size_assertion]): if channels == 1: image = tf.squeeze(image) crop_start = [crop_top, crop_left, ] crop_shape = [crop_height, crop_width, ] elif channels >= 3: crop_start = [crop_top, crop_left, 0] crop_shape = [crop_height, crop_width, -1] image = tf.slice(image, crop_start, crop_shape) return tf.reshape(image, [crop_height, crop_width, -1])
fc9ad33ad5fc9150a299328fb3c77bb662c25339
33,770
import enum def mapper_or_checker(container): """Callable to map the function parameter values. Parameters ---------- container : dict-like object Raises ------ TypeError If the unit argument cannot be interpreted. Example ------- >>> conv = mapper_or_checker({True: 42}) >>> conv(True) 42 """ if isinstance(container, dict): return mapper(container) if isinstance(container, enum.EnumMeta): return enum_mapper(container) if isinstance(container, set): return membership_checker(container) raise TypeError('to_mapper argument must be a dict, ' 'not {}'.format(container))
7c8b22fd3ef7fa52b7ebb94f7d5b5fda48a1a683
33,771
def cmd_konesyntees(bot, update, args): """Use superior estonian technology to express your feelings like you've never before!""" chatid = update.message.chat_id text = '' for x in args: text += f'{x} ' try: tts = gTTS(text=text, lang='et') tts.save('bot/konesyntees/konesyntees.mp3') with open('bot/konesyntees/konesyntees.mp3', 'rb') as file: bot.send_document(chatid, file) except AttributeError: return update.message.reply_text('The Konesyntees TTS API seems to be offline at the moment. Please try again later.')
368ebd43191c219f4f0d7b3b362bb94d117d238e
33,772
def tinynet_a(pretrained=False, **kwargs): """ TinyNet """ r, c, d = TINYNET_CFG['a'] default_cfg = default_cfgs['tinynet_a'] assert default_cfg['input_size'] == (3, 224, 224) channel, height, width = default_cfg['input_size'] height = int(r * height) width = int(r * width) default_cfg['input_size'] = (channel, height, width) print("input_size:", default_cfg['input_size']) print("channel mutiplier:%s, depth multiplier:%s, resolution multiplier:%s" % (c, d, r)) model = _gen_tinynet( default_cfgs['tinynet_a'], channel_multiplier=c, depth_multiplier=d, pretrained=pretrained, **kwargs) return model
08bb62c45f8ed5b3eab3ec3ad2aab2d0bcb917ea
33,774
import requests def reserve_offer_request(offer_id: int, adult_count: int, children_count: int): """ Request order offer """ # call to API Gateway for getting offers response = requests.post( ORDER_RESERVE_REQUEST_ENDPOINT, data={ "offer_id": offer_id, "customer_email": session["email"], "adult_count": adult_count, "children_count": children_count, }, ) if response.status_code != 200: raise NotImplementedError return response.json()
d3c2b1d62c7f64229dfbce3b0688f135b0c8ca1a
33,775
def evaluate_nccl(baseline: dict, results: dict, failures: int, tolerance: int) -> int: """ Evaluate the NCCL test results against the baseline. Determine if the NCCL test results meet the expected threshold and display the outcome with appropriate units. Parameters ---------- baselines : dict A ``dictionary`` of the baseline to compare results against. results : dict A ``dictionary`` of the parsed results. failures : int An ``integer`` of the number of results that have not met the threshold. tolerance : int An ``int`` of the percentage below the threshold to still mark as passing. Returns ------- int Returns an ``integer`` of the number of results that have not met the threshold. """ if 'max_bus_bw' not in baseline.keys(): return failures print(' NCCL Max Bus Bandwidth (GB/s)') expected = baseline['max_bus_bw'] got = results['nccl']['max_bus_bw'] text = f' Expected: {expected}, Got: {got}' result = metric_passes(expected, got, tolerance) output, failures = result_text(result, failures) text += f', Result: {output}' print(text) return failures
3fdf3d75def46f98f971a1740f397db0b786fd7c
33,776
def calculate_AUC(x_axis, y_axis): """ Calculates the Area Under Curve (AUC) for the supplied x/y values. It is assumed that the x axis data is either monotonoically increasing or decreasing Input: x_axis: List/numpy array of values y_axis: list/numpy array of values Output: AUC """ return auc(x_axis, y_axis)
fa474fd210ee9bb86738e9f93795dd580680c2db
33,777
def next_nibble(term, nibble, head, worm): """ Provide the next nibble. continuously generate a random new nibble so long as the current nibble hits any location of the worm. Otherwise, return a nibble of the same location and value as provided. """ loc, val = nibble.location, nibble.value while hit_vany([head] + worm, nibble_locations(loc, val)): loc = Location(x=randrange(1, term.width - 1), y=randrange(1, term.height - 1)) val = nibble.value + 1 return Nibble(loc, val)
a8861672b0dcc22e5aad2736d87a50f31eb0b864
33,779
def line_pattern_matrix(wl, wlc, depth, weight, vels): """ Function to calculate the line pattern matrix M given in Eq (4) of paper Donati et al. (1997), MNRAS 291, 658-682 :param wl: numpy array (1D), input wavelength data (size n = spectrum size) :param wlc: numpy array (1D), central wavelengths (size = number of lines) :param depth: numpy array (1D), line depths (size = number of lines) :param weight: numpy array (1D), line polar weights (size = number of lines) :param vels: numpy array (1D), , LSD profile velocity vector (size = m) :return mm, mmp mm: numpy array (2D) of size n x m, line pattern matrix for flux LSD. mmp: numpy array (2D) of size n x m, line pattern matrix for polar LSD. """ # set number of points and velocity (km/s) limits in LSD profile mnum, vinit, vfinal = len(vels), vels[0], vels[-1] # set number of spectral points num = len(wl) # initialize line pattern matrix for flux LSD mmf = np.zeros((num, mnum)) # initialize line pattern matrix for polar LSD mmp = np.zeros((num, mnum)) # set first i=0 -> trick to improve speed i0 = 0 # set values of line pattern matrix M for lt in range(len(wlc)): noi0 = True for it in range(i0, num): # Calculate line velocity: v = c Δλ / λ velocity = speed_of_light * (wl[it] - wlc[lt]) / wlc[lt] if vinit <= velocity <= vfinal: # below is a trick to improve speed if noi0: # next spectral line starts with first i of previous line # warning: list of CCF lines must be sorted by wavelength i0 = it noi0 = False for jt in range(mnum - 1): if vels[jt] <= velocity < vels[jt + 1]: mmp[it][jt] += weight[lt] mmf[it][jt] += depth[lt] if mmf[it][jt] > 1.0: mmf[it][jt] = 1.0 break elif velocity > vfinal: break # return the line pattern matrix for flux and for polar return mmf, mmp
fc5e50000ccbd7cad539e7ee2acd5767dd81c8be
33,780
def pandas_join_string_list(row, field, sep=";"): """ This function checks if the value for field in the row is a list. If so, it is replaced by a string in which each value is separated by the given separator. Args: row (pd.Series or similar): the row to check field (string): the name of the field sep (string): the separator to use in joining the values """ raise_deprecation_warning("pandas_join_string_list", "misc.pandas_utils", "0.3.0", "misc") s = wrap_string_in_list(row[field]) return sep.join(s)
dd185fc0aad5a6247f8ea3280b18a3c910fbd723
33,781
def OpenClipboardCautious(nToTry=4, waiting_time=0.1): """sometimes, wait a little before you can open the clipboard... """ for i in range(nToTry): try: win32clipboard.OpenClipboard() except: time.sleep(waiting_time) continue else: wait = (i+2)*waiting_time print 'extra wait OpenClipboardCautious: %s'% wait time.sleep(wait) return True
ac67eb130b2564508eb5b77176736df06032dd9b
33,782
def data_change_url_to_name(subject_data: tuple, column_name: str) -> tuple: """ Changes names instead of urls in cell data (later displayed on the buttons). """ return tuple(dh.change_url_to_name(list(subject_data), column_name))
43030eab85d087c6c09b7154342e60e90f97b28f
33,783
from datetime import datetime import requests def get_run_ids(release_id, request_url, auth_token): """ Get the test run IDs for the given release ID - each feature will have a unique run ID :param release_id: the release ID from Azure DevOps :param request_url: the URL for the Microsoft API :param auth_token: the ID number of the release from Azure DevOps :return: list of run IDs which were found """ max_last_updated_date = datetime.datetime.now() min_last_updated_date = max_last_updated_date - datetime.timedelta(days=1) response = requests.get( request_url, params={"minLastUpdatedDate": min_last_updated_date, "maxLastUpdatedDate": max_last_updated_date, "releaseIds": release_id, "api-version": AZURE_API_VERSION_GET }, auth=("", auth_token) ) print(f"Get run IDs for release {release_id} - response {response.status_code}") if not response.status_code == 200: print_azure_error(f"Could not get run IDs for release {release_id}") print_response_info(response) return [] response_json = response.json() run_ids = [] for item in response_json["value"]: run_ids.append(item["id"]) return run_ids
ee8d99fbe0bdc1a937e889f5184bc854a21ab2a7
33,784
def operGet(fn): """ this is Decoration method get opearte """ def _new(self, *args, **kws): try: obj = args[0] #print obj.id if hasattr(obj, "id") or hasattr(obj, "_id"): key = operKey(obj, self.name) args = args[1:] kws["obj"] = obj return fn(self, key, *args, **kws) else: raise StandardError("please object is new not have object.id") except Exception, e: print e return wraps(fn)(_new)
8395496d99fc4e88599c77a30836c1d49944240d
33,785
def latent_iterative_pca(layer, batch, conv_method: str = 'median'): """Get NxN matrix of principal components sorted in descending order from `layer_history` Args: layer_history : list, layer outputs during training Returns: eig_vals : numpy.ndarray of absolute value of eigenvalues, sorted in descending order P : numpy.ndarray, NxN square matrix of principal components calculated over training """ cov = _get_iterative_cov(layer, batch, conv_method=conv_method) eig_vals = np.linalg.eigvalsh(cov) # Sort the eigenvalues from high to low eig_vals = sorted(eig_vals, reverse=True) return eig_vals
320408f3eec33075e808b9c92de951b7bc30c6ae
33,786
import scipy def filter_max(data,s=(1,1),m="wrap",c=0.0): """ Apply maximum filter to data (real and imaginary seperately) Parameters: * data Array of spectral data. * s tuple defining shape or size taken for each step of the filter. * m Defines how edges are determinded ('reflect','constant','nearest', 'mirror','wrap'). Filter mode parameter. * c Constant Value for use in 'constant' mode """ data.real=scipy.ndimage.maximum_filter(data.real,size=s,mode=m,cval=c) data.imag=scipy.ndimage.maximum_filter(data.imag,size=s,mode=m,cval=c) return data
2e701d2751f394e5a1d25355a8c5b4533972d92d
33,787
def update_alarms(_): """ Entry point for the CloudWatch scheduled task to discover and cache services. """ return periodic_handlers.update_alarms()
9e5f9bf94ed051194e502524420a0e47ac7e75f2
33,788
def create_mapping(alphabet): """ Change list of chars to list of ints, taking sequencial natural numbers :param alphabet: list of char :return: dictionary with keys that are letters from alphabet and ints as values """ mapping = {} for (letter, i) in zip(alphabet, range(len(alphabet))): mapping[letter] = i return mapping
20ef12101597206e08ca0ea399d97af0f5c8b760
33,790
async def async_setup_entry(hass, config_entry, async_add_devices): """Set up the Alexa alarm control panel platform by config_entry.""" return await async_setup_platform( hass, config_entry.data, async_add_devices, discovery_info=None )
0a80a5d9e2cd5ff8e39200def1d86f30cdc552aa
33,792
def admin_get_all_requests(current_user): """Gets all requests""" requests = admin_get_all(current_user['id']) return jsonify(requests),200
a3e7202e73894ab05a4575f98460d1b012c0243e
33,793
import json def git_drv_info(url, version): """ Retrieve the necessary git info to create a nix expression using `fetchgit`. """ rev = [] if version != "master": rev = ["--rev", version] ret = run_cmd( ["nix-prefetch-git", "--no-deepClone", "--quiet", "--url", url,] + rev ) return json.loads(ret.stdout)
d08d40da42fa24c3f50ac2069d8a4b73b4d09b37
33,794
import colorsys import random def draw_bbox(image, bboxes, classes): """ [[[original function from Yun-Yuan: bboxes: [x_min, y_min, x_max, y_max, probability, cls_id] format coordinates.]]] bboxes: [cls_id, probability, x_min, y_min, x_max, y_max] format coordinates.] """ num_classes = len(classes) image_h, image_w, _ = image.shape hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)] colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) random.seed(0) random.shuffle(colors) # random.seed(None) # print('len(bboxes) = ', len(bboxes)) for i, bbox in enumerate(bboxes): coor = np.array(bbox[2:], dtype=np.int32) fontScale = 0.5 score = bbox[1] class_ind = int(bbox[0]) # print('class_ind = ', class_ind) bbox_color = colors[class_ind] bbox_thick = int(0.6 * (image_h + image_w) / 600) c1, c2 = (coor[0], coor[1]), (coor[2], coor[3]) cv2.rectangle(image, c1, c2, bbox_color, bbox_thick) bbox_mess = '%s: %.2f' % (classes[class_ind], score) t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick//2)[0] cv2.rectangle(image, c1, (c1[0] + t_size[0], c1[1] - t_size[1] - 3), bbox_color, -1) # filled cv2.putText(image, bbox_mess, (c1[0], c1[1]-2), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 0, 0), bbox_thick//2, lineType=cv2.LINE_AA) return image
8f479e8ed5ecef60195df5edc82c2b0e2c3e261c
33,795
def is_admin(user): """ Test if a user has the admin group. This function is meant to be used by the user_passes_test decorator to control access to views. It uses the is_member function with a predefined list of groups. Parameters ---------- user : django.contrib.auth.models.User The user which we are trying to identify that belongs to Admin. Returns --------- bool True if the user has Admin as a group """ return is_member(user, [ADMIN_GROUP])
044550ec2f3aaf8f748fb823b64ba1fb099f71fd
33,796
def Grid(gridtype, *args, **kwargs): """Return an instance of the GridBase child class based on type. Parameters ---------- gridtype : string Type of grid; choices: ['cell-centered', 'x-face', 'y-face']. var_names : list of strings List of names for the variables to create. nx : integer Number of cells in the x-direction. ny : integer Number of cells in the y-direction. xmin : float Domain limit at the left side. xmax : float Domain limit at the right side. ymin : float Domain limit at the bottom side. ymax : float Domain limit at the top side. user_bc_type : dictionary of (string, list) items User-defined boundary types to overwrite default ones. user_bc_val : dictionary of (string, list) items User-defined boundary values to overwrite default ones. Returns ------- obj : instance of the GridBase child The grid object. """ for cls in GridBase.__subclasses__(): if cls.check_gridtype(gridtype): return cls(*args, **kwargs) raise ValueError('Parameter "gridtype" should be either ' '"cell-centered", "x-face", or "y-face"')
29fc912d7cfece5a290950eb9925e99001c8a231
33,798
def read_tess_lightcurve(filename, flux_column="pdcsap_flux", quality_bitmask="default"): """Returns a `TessLightCurve`. Parameters ---------- filename : str Local path or remote url of a Kepler light curve FITS file. flux_column : 'pdcsap_flux' or 'sap_flux' Which column in the FITS file contains the preferred flux data? quality_bitmask : str or int Bitmask (integer) which identifies the quality flag bitmask that should be used to mask out bad cadences. If a string is passed, it has the following meaning: * "none": no cadences will be ignored (`quality_bitmask=0`). * "default": cadences with severe quality issues will be ignored (`quality_bitmask=1130799`). * "hard": more conservative choice of flags to ignore (`quality_bitmask=1664431`). This is known to remove good data. * "hardest": removes all data that has been flagged (`quality_bitmask=2096639`). This mask is not recommended. See the :class:`KeplerQualityFlags` class for details on the bitmasks. """ lc = _read_lightcurve_fits_file(filename, flux_column=flux_column, time_format='btjd') # Filter out poor-quality data # NOTE: Unfortunately Astropy Table masking does not yet work for columns # that are Quantity objects, so for now we remove poor-quality data instead # of masking. Details: https://github.com/astropy/astropy/issues/10119 quality_mask = TessQualityFlags.create_quality_mask( quality_array=lc['quality'], bitmask=quality_bitmask) lc = lc[quality_mask] lc.meta['targetid'] = lc.meta.get('ticid') lc.meta['quality_bitmask'] = quality_bitmask lc.meta['quality_mask'] = quality_mask return TessLightCurve(data=lc)
85dec44f2bab3724e5201110d1ec48c283468843
33,799
async def fix_issue(number: int, *, reason: str=None) -> dict: """Synchronously labels the specified issue as fixed, or closed. :param number: The issue number on GitHub to fix or consider, if a suggestion :param reason: The reason the issue was not considered. Note: this parameter only applies to suggestions """ return await loop.run_in_executor( None, partial( fix_issue_sync, number, reason = reason))
aef20cc669267eabc441ca7b5102f41d3767c482
33,800
from typing import List from typing import Tuple from typing import Set import re from typing import OrderedDict def _parse_schema(s: str) -> Schema: """Instantiate schema dict from a string.""" tables: List[Tuple[str, Fields]] = [] seen: Set[str] = set() current_table = '' current_fields: List[Field] = [] lines = list(reversed(s.splitlines())) # to pop() in right order while lines: line = lines.pop().strip() table_m = re.match(r'^(?P<table>\w.+):$', line) field_m = re.match(r'\s*(?P<name>\S+)' r'(\s+(?P<flags>[^#]+))?' r'(\s*#\s*(?P<comment>.*)$)?', line) if table_m is not None: table_name = table_m.group('table') if table_name in seen: raise TSDBSchemaError(f'table {table_name} redefined') current_table = table_name current_fields = [] tables.append((current_table, current_fields)) seen.add(table_name) elif field_m is not None and current_table: name = field_m.group('name') flags = field_m.group('flags').split() datatype = flags.pop(0) comment = field_m.group('comment') current_fields.append( Field(name, datatype, flags, comment) ) elif line != '': raise TSDBSchemaError('invalid line in schema file: ' + line) return OrderedDict(tables)
0f84b794d8ddf563083d603de578c5c646be97de
33,801
def mom2(data, axis=0): """ Intensity-weighted coordinate dispersion (function version). Pixel units. """ shp = list(data.shape) n = shp.pop(axis) x = np.zeros(shp) x2 = np.zeros(shp) w = np.zeros(shp) # build up slice-by-slice, to avoid big temporary cubes for loc in range(n): slc = tuple(loc if j == axis else slice(None) for j in range(data.ndim)) val = data[slc] val = np.maximum(val, 0) x += val * loc x2 += val * loc * loc w += val return np.sqrt(x2 / w - (x / w) ** 2)
6b47cf9e9486b6631462c9dca2f4079361b4c057
33,802
def get_rdf_bin_labels(bin_distances, cutoff): """ Common function for getting bin labels given the distances at which each bin begins and the ending cutoff. Args: bin_distances (np.ndarray): The distances at which each bin begins. cutoff (float): The final cutoff value. Returns: [str]: The feature labels for the *RDF """ bin_dists_complete = np.concatenate((bin_distances, np.asarray([cutoff]))) flabels = [""] * len(bin_distances) for i, _ in enumerate(bin_distances): lower = "{:.5f}".format(bin_dists_complete[i]) higher = "{:.5f}".format(bin_dists_complete[i + 1]) flabels[i] = f"[{lower} - {higher}]" return flabels
81e8e3dc217e69c504eb1b916e0d09a499228d34
33,803
import math def tangent_point(circle, circle_radius, point, angle_sign=1): """Circle tangent passing through point, angle sign + if clockwise else - """ circle2d, point2d = a2(circle), a2(point) circle_distance = dist2d(circle2d, point2d) + 1e-9 relative_angle = math.acos(Range(circle_radius / circle_distance, 1)) point_angle = angle(point2d, circle2d) tangent_angle = (point_angle - relative_angle * sign(angle_sign)) tangentx = math.cos(tangent_angle) * circle_radius + circle2d[0] tangenty = math.sin(tangent_angle) * circle_radius + circle2d[1] return a2([tangentx, tangenty])
e3cbf843e893e436df1e7ad492e2bfb8a63a0bf2
33,804
def range_projection(current_vertex, fov_up=3.0, fov_down=-25.0, proj_H=64, proj_W=900, max_range=50): """ Project a pointcloud into a spherical projection, range image. Args: current_vertex: raw point clouds Returns: proj_range: projected range image with depth, each pixel contains the corresponding depth proj_vertex: each pixel contains the corresponding point (x, y, z, 1) proj_intensity: each pixel contains the corresponding intensity proj_idx: each pixel contains the corresponding index of the point in the raw point cloud """ # laser parameters fov_up = fov_up / 180.0 * np.pi # field of view up in radians fov_down = fov_down / 180.0 * np.pi # field of view down in radians fov = abs(fov_down) + abs(fov_up) # get field of view total in radians # get depth of all points depth = np.linalg.norm(current_vertex[:, :3], 2, axis=1) current_vertex = current_vertex[(depth > 0) & (depth < max_range)] # get rid of [0, 0, 0] points depth = depth[(depth > 0) & (depth < max_range)] # get scan components scan_x = current_vertex[:, 0] scan_y = current_vertex[:, 1] scan_z = current_vertex[:, 2] intensity = current_vertex[:, 3] # get angles of all points yaw = -np.arctan2(scan_y, scan_x) pitch = np.arcsin(scan_z / depth) # get projections in image coords proj_x = 0.5 * (yaw / np.pi + 1.0) # in [0.0, 1.0] proj_y = 1.0 - (pitch + abs(fov_down)) / fov # in [0.0, 1.0] # scale to image size using angular resolution proj_x *= proj_W # in [0.0, W] proj_y *= proj_H # in [0.0, H] # round and clamp for use as index proj_x = np.floor(proj_x) proj_x = np.minimum(proj_W - 1, proj_x) proj_x = np.maximum(0, proj_x).astype(np.int32) # in [0,W-1] proj_y = np.floor(proj_y) proj_y = np.minimum(proj_H - 1, proj_y) proj_y = np.maximum(0, proj_y).astype(np.int32) # in [0,H-1] # order in decreasing depth order = np.argsort(depth)[::-1] depth = depth[order] intensity = intensity[order] proj_y = proj_y[order] proj_x = proj_x[order] scan_x = scan_x[order] scan_y = scan_y[order] scan_z = scan_z[order] indices = np.arange(depth.shape[0]) indices = indices[order] proj_range = np.full((proj_H, proj_W), -1, dtype=np.float32) # [H,W] range (-1 is no data) proj_vertex = np.full((proj_H, proj_W, 4), -1, dtype=np.float32) # [H,W] index (-1 is no data) proj_idx = np.full((proj_H, proj_W), -1, dtype=np.int32) # [H,W] index (-1 is no data) proj_intensity = np.full((proj_H, proj_W), -1, dtype=np.float32) # [H,W] index (-1 is no data) proj_range[proj_y, proj_x] = depth proj_vertex[proj_y, proj_x] = np.array([scan_x, scan_y, scan_z, np.ones(len(scan_x))]).T proj_idx[proj_y, proj_x] = indices proj_intensity[proj_y, proj_x] = intensity return proj_range, proj_vertex, proj_intensity, proj_idx
aa7efa9ab365cb3e9aaf531c799a9e9615934aac
33,805
def _get_blobs(im, rois, mask=False): """Convert an image and RoIs within that image into network inputs.""" blobs = {'data' : None, 'rois' : None} blobs['data'], im_scale_factors = _get_image_blob(im) if not cfg.TEST.HAS_RPN or mask: blobs['rois'] = _get_rois_blob(rois, im_scale_factors) return blobs, im_scale_factors
3e7dea823fd3471c939d779ee57067e0e856e069
33,806
from typing import List def urls() -> List[str]: """ Returns all URLs contained in the pasteboard as strings. """ urls = general_pasteboard().URLs str_urls = [] if urls is not None: for url in urls: str_urls.append(str(url.absoluteString)) return str_urls
0ddf0e88ae588a5ee0ac7dbf40dc8e261296cfe7
33,807
def eager_no_dists(cls, *args): """ This interpretation is like eager, except it skips special distribution patterns. This is necessary because we want to convert distribution expressions to normal form in some tests, but do not want to trigger eager patterns that rewrite some distributions (e.g. Normal to Gaussian) since these tests are specifically intended to exercise funsor.distribution.Distribution. """ if issubclass(cls, funsor.distribution.Distribution) and not isinstance( args[-1], funsor.Tensor ): return reflect.interpret(cls, *args) result = eager.dispatch(cls, *args)(*args) if result is None: result = normalize.dispatch(cls, *args)(*args) if result is None: result = lazy.dispatch(cls, *args)(*args) if result is None: result = reflect.interpret(cls, *args) return result
6f3b4e279001c929348ba7c07bdc34c45d0b32ae
33,808
def euclid_dist( arr1, arr2, unsigned=True): """ Calculate the element-wise correlation euclidean distance. This is the distance D between the identity line and the point of coordinates given by intensity: \\[D = abs(A2 - A1) / sqrt(2)\\] Args: arr1 (ndarray): The first array arr2 (ndarray): The second array unsigned (bool): Use signed distance Returns: arr (ndarray): The resulting array Examples: >>> arr1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) >>> arr2 = np.array([-1.0, -2.0, -3.0, -4.0, -5.0, -6.0]) >>> euclid_dist(arr1, arr2) array([1.41421356, 2.82842712, 4.24264069, 5.65685425, 7.07106781, 8.48528137]) >>> euclid_dist(arr1, arr2, False) array([-1.41421356, -2.82842712, -4.24264069, -5.65685425, -7.07106781, -8.48528137]) """ arr = (arr2 - arr1) / 2.0 ** 0.5 if unsigned: arr = np.abs(arr) return arr
44f242ac21b97dea8d8e5c933e51cbbbaae80b7b
33,810
def compute_redshift_path_per_line(Wobs, wa, fl,er,sl=3.,R=20000,FWHM=10,wa0=1215.67,fl_th=0,zmin=None,zmax=None): """Compute the total redshift path for a given line with Wobs (could be a np.array), in a given spectrum fl (with error er).""" #define N, number of lines try: N = len(Wobs) except: N = 1 #redshift path to return Dz = [] #compute Wmin for given spectrum, and dz z, Wmin = compute_Wmin(wa,fl,er,sl=sl,R=R,FWHM=FWHM,wa0=wa0,fl_th=fl_th) ## Here, sort out zlims if zmax is None: zmax = 1000. if zmin is None: zmin = 0. if N==1: #Obtain zgood (1 or 0 depending if Wobs>Wmin) zgood = (Wobs >= Wmin) & (z >= zmin) & ( z < zmax) zgood = zgood*1 if np.sum(zgood)==0: Dz = 0 #find edges where zgood=1 lower_edges, upper_edges = find_edges(zgood) #Final redshift path is given by the difference between edges Dz = np.sum(z[upper_edges]-z[lower_edges]) else: for i in xrange(N): #Obtain zgood (1 or 0 depending if Wobs>Wmin) zgood = (Wobs[i] >= Wmin) & (z >= zmin) & ( z < zmax) zgood = zgood*1 if np.sum(zgood)==0: Dz += [0] continue #find edges where zgood=1 lower_edges, upper_edges = find_edges(zgood) #Final redshift path is given by the difference between edges Dz_aux = np.sum(z[upper_edges]-z[lower_edges]) Dz += [Dz_aux] Dz = np.array(Dz) return Dz
f15a24e0414bb03280d197abde14073b1466cab7
33,811
import sympy def guard(clusters): """ Split Clusters containing conditional expressions into separate Clusters. """ processed = [] for c in clusters: free = [] for e in c.exprs: if e.conditionals: # Expressions that need no guarding are kept in a separate Cluster if free: processed.append(Cluster(free, c.ispace, c.dspace)) free = [] # Create a guarded Cluster guards = {} for d in e.conditionals: condition = guards.setdefault(d.parent, []) if d.condition is None: condition.append(CondEq(d.parent % d.factor, 0)) else: condition.append(d.condition) guards = {k: sympy.And(*v, evaluate=False) for k, v in guards.items()} processed.append(Cluster(e, c.ispace, c.dspace, guards)) else: free.append(e) # Leftover if free: processed.append(Cluster(free, c.ispace, c.dspace)) return processed
9acb1aecb8423e670cb0b60747bac2b32d56dbe8
33,812
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up Hello World from a config entry.""" # Store an instance of the "connecting" class that does the work of speaking # with your actual devices. hass.data.setdefault(DOMAIN, {})[entry.entry_id] = hub.Hub(hass, entry.data["host"]) # This creates each HA object for each platform your device requires. # It's done by calling the `async_setup_entry` function in each platform module. hass.config_entries.async_setup_platforms(entry, PLATFORMS) return True
9a3005c02a2d665ab5cab96a15fee8d3e353f5c3
33,813
def make_viewnames(pathnames, tfm_unique_only=False, order_func=default_viewname_order): """ Make all view names from the paths given as arguments. Parameters ---------- pathnames : list[str] tfm_unique_only : bool Default: False. If True, returns only the views that give *different* imaging results with TFM (AB-CD and DC-BA give the same imaging result). order_func : func Function for sorting the views. Returns ------- list[tuple[str] """ viewnames = [] for tx in pathnames: for rx in pathnames: viewnames.append((tx, rx)) if order_func is not None: viewnames = list(sorted(viewnames, key=default_viewname_order)) if tfm_unique_only: viewnames = filter_unique_views(viewnames) return viewnames
26558877ffed5d06a66b459bce64385f116fa952
33,814
def installed_packages(): """Returns a list of all installed packages Returns: :obj:`list` of :obj:`str`: List of all the currently installed packages """ _check_for_old_versions() return [x["name"] for x, _ in _iter_packages()]
11aca5f8830d66961ac0af351eb5b4633a82808c
33,815
def get_hponeview_client(args): """Generate an instance of the HPE OneView client. :returns: an instance of the HPE OneView client. :raises: OneViewConnectionError if try a secure connection without a CA certificate file path in Ironic OneView CLI configuration file. """ ssl_certificate = args.ov_cacert insecure = True if args.ov_insecure.lower() == "true" else False if not (insecure or ssl_certificate): raise exceptions.OneViewConnectionError( "Failed to start Ironic OneView CLI. Attempting to open secure " "connection to OneView but CA certificate file is missing. Please " "check your configuration file.") if insecure: print("Ironic OneView CLI is opening an insecure connection to " "HPE OneView. We recommend you to configure secure connections " "with a CA certificate file.") if ssl_certificate: print("Insecure connection to OneView, the CA certificate: %s " "will be ignored." % ssl_certificate) ssl_certificate = None config = { "ip": args.ov_auth_url, "credentials": { "userName": args.ov_username, "password": args.ov_password }, "ssl_certificate": ssl_certificate } try: client = oneview_client.OneViewClient(config) except oneview_exceptions.HPOneViewException as ex: print("Ironic OneView CLI could not open a connection to HPE OneView. " "Check credentials and/or CA certificate file. See details on " "error below:\n") raise ex return client
6a667caab302815c17e84edd52cb08a375ed587b
33,816
def random_binary_tree(depth, p=.8): """ Constructs a random binary tree with given maximal depth and probability p of bifurcating. Parameters: depth: The maximum depth the tree can have (might not be reached due to probabilistic nature of algorithm) p: Probability that any node bifurcates. Returns: G: Random binary tree NetworkX DiGraph """ G = nx.DiGraph() G.add_node(0) G.graph['root'] = 0 _random_binary_tree(G, 0, depth, p) return G
ec44c0fa4d47f1e50ddbdaf4a2aacca988e5d57f
33,817
def make_uid_setter(**kwargs): """ Erzeuge eine Funktion, die die UID einer bekannten Ressource setzt; es wird nichts neu erzeugt """ if 'catalog' not in kwargs: context = kwargs.pop('context') catalog = getToolByName(context, 'portal_catalog') else: catalog = kwargs.pop('catalog') logger = kwargs.pop('logger') find_one = make_distinct_finder(catalog=catalog, logger=logger) # Vorgabewerte: optional = kwargs.pop('optional', False) shortcircuit = kwargs.pop('shortcircuit', False) if kwargs: logger.error('make_uid_setter: unused arguments! (%(kwargs)r)', locals()) if 'portal' not in kwargs: if 'context' in kwargs: # might have been popped above already context = kwargs.pop('context') site = getToolByName(context, 'portal_url') else: site = kwargs.pop('portal') def visible_path(s): """ >>> visible_path('/gkz/meine-kurse') '/meine-kurse' """ assert s.startswith('/') liz = s.split('/') del liz[1] return '/'.join(liz) def set_uid(uid_new, path, uid_old=None, optional=optional, shortcircuit=shortcircuit): """ uid_new -- die neue UID (jedenfalls benötigt) path -- ein Pfad, oder eine Sequenz von Pfaden (benötigt) uid_old -- optional; darf, wenn angegeben, nicht gleich uid_new sein optional -- das übergebene Dings ist optional; wenn es fehlt, ist das nicht schlimm (aber wenn es da ist, soll es die angegebene UID bekommen) shortcircuit -- Wenn mehrere Pfade angegeben wurden, nach dem ersten Treffer aufhören zu suchen; ansonsten wird auch geprüft, ob die anderen ebenfalls da sind, und dies ggf. protokolliert """ if isinstance(path, six_string_types): paths = [path] elif path: paths = tuple(path) else: paths = [] brain_new = find_one(uid=uid_new) if uid_old: if uid_old == uid_new: raise ValueError('Different UIDs expected, got: %(uid_new)r' % locals()) brain_old = find_one(uid=uid_old) if brain_old and brain_new: logger.fatal('set_uid: Both old (%(uid_old)r) and new ' '(%(uid_new)r) UIDs founc!', locals()) return False o_old = brain_old.getObject() logger.info('Setting UID of %(o_old)r to %(uid_new)r' ' (was: %(uid_old)r', locals()) o_old._setUID(uid_new) return True if brain_new: path_new = visible_path(brain.getPath) if paths: if path_new in paths: logger.info('Found UID %(uid_new)r in expected path' ' %(path_new)r', locals()) else: logger.warn('Found UID %(uid_new)r in UNEXPECTED path' ' %(path_new)r (expected: %(paths)s)', locals()) else: logger.info('Found UID %(uid_new)r in path' ' %(path_new)r (no expectations specified)', locals()) return True # keine Treffer über UIDs; jetzt nach Pfaden suchen: done = False for pa in paths: logger.info('seeking %(pa)r ...', locals()) o = site.restrictedTraverse(pa) if o: if done: logger.warn('ignoring %(o)r', locals()) else: logger.info('setting UID of %(o)r to %(uid_new)r', locals()) o._setUID(uid_new) done = True if shortcircuit: break if done: return True elif optional: logger.info('Nothing found (new uid: %(uid_new)r,' ' paths: %(paths)r', locals()) return False else: logger.error('Nothing found! (new uid: %(uid_new)r,' ' paths: %(paths)r', locals()) return False return set_uid
2676ca013fcd376cf4df4a8d314cc52cd91210e8
33,818
from gi.repository import Gtk def build_action_group(obj, name=None): """ Build actions and a Gtk.ActionGroup for each Action instance found in obj() (that's why Action is a class ;) ). This function requires GTK+. >>> class A: ... @action(name='bar') ... def bar(self): print('Say bar') ... @toggle_action(name='foo') ... def foo(self, active): print('Say foo', active) ... @radio_action(names=('baz', 'beer'), labels=('Baz', 'Beer')) ... def baz(self, value): ... print('Say', value, (value and "beer" or "baz")) >>> group = build_action_group(A()) Say 0 baz >>> len(group.list_actions()) 4 >>> a = group.get_action('bar') >>> a.activate() Say bar >>> group.get_action('foo').activate() Say foo True >>> group.get_action('beer').activate() Say 1 beer >>> group.get_action('baz').activate() Say 0 baz """ group = Gtk.ActionGroup.new(name or str(obj)) objtype = type(obj) for attrname in dir(obj): try: # Fetch the methods from the object's type instead of the object # itself. This prevents some descriptors from executing. # Otherwise stuff like dependency resolving may kick in # too early. method = getattr(objtype, attrname) except: continue act = getattr(method, "__action__", None) if isinstance(act, radio_action): actgroup = None if not act.labels: act.labels = [None] * len(act.names) if not act.tooltips: act.tooltips = [None] * len(act.names) if not act.stock_ids: act.stock_ids = [None] * len(act.names) if not act.accels: act.accels = [None] * len(act.names) assert len(act.names) == len(act.labels) assert len(act.names) == len(act.tooltips) assert len(act.names) == len(act.stock_ids) assert len(act.names) == len(act.accels) for i, n in enumerate(act.names): gtkact = Gtk.RadioAction.new( n, act.labels[i], act.tooltips[i], act.stock_ids[i], value=i ) if not actgroup: actgroup = gtkact else: gtkact.props.group = actgroup group.add_action_with_accel(gtkact, act.accels[i]) actgroup.connect("changed", _radio_action_changed, obj, attrname) actgroup.set_current_value(act.active) elif isinstance(act, toggle_action): gtkact = Gtk.ToggleAction.new( act.name, act.label, act.tooltip, act.stock_id ) gtkact.set_property("active", act.active) gtkact.connect("activate", _toggle_action_activate, obj, attrname) group.add_action_with_accel(gtkact, act.accel) elif isinstance(act, action): gtkact = Gtk.Action.new(act.name, act.label, act.tooltip, act.stock_id) gtkact.connect("activate", _action_activate, obj, attrname) group.add_action_with_accel(gtkact, act.accel) elif act is not None: raise TypeError(f"Invalid action type: {action}") return group
26abe527d4e0d1e98080480e60d51e4a23be5d80
33,819
def afftdn(stream: Stream, *args, **kwargs) -> FilterableStream: """https://ffmpeg.org/ffmpeg-filters.html#afftdn""" return filter(stream, afftdn.__name__, *args, **kwargs)
e4a878b29f47fec06b0c6c63ae305c17361f522c
33,820
def sh2vap(q, p): """Specific Humidity to Water vapor pressure formula derived from ratio of humid air vs. total air Parameters ---------- q specific humidity [kg/kg] p total air pressure [Pa] Returns ------- water vapor pressure [Pa] """ c = eps() # Rd / Rv = 0.622 return (q * p) / (q + (1 - q) * c)
e205499ba1cf36a2521875f341029ae11ba55fc7
33,821
def triplets(a, b, c): """ Time: O(n) Space: O(n lg n), for sorting - n = a_len + b_len + c_len """ a = list(sorted(set(a))) b = list(sorted(set(b))) c = list(sorted(set(c))) ai = bi = ci = 0 a_len, c_len = len(a), len(c) answer = 0 while bi < len(b): while ai < a_len and a[ai] <= b[bi]: ai += 1 while ci < c_len and b[bi] >= c[ci]: ci += 1 answer += ai * ci bi += 1 return answer
d15d340d0a4b870124bbfd8ca6f40358a27f7555
33,822
import collections def flatten_dict(d, parent_key='', joinchar='.'): """ Returns a flat dictionary from nested dictionaries the flatten structure will be identified by longer keys coding the tree. A value will be identified by the joined list of node names e.g.: {'a': {'b': 0, 'c': {'r': 10, 's': 20 } }, 'd': 3 } becomes {'a.b':0, 'a.c.r':10, 'a.c.s':10, 'd':3 } Parameters ---------- d: dict nested dictionary parent_key: str, optional (default=empty) optional parent key used during the recursion joinchar: str, optional (default='.') joining character between levels Returns ------- fd: dict flatten dictionary """ items = [] for k, v in d.items(): new_key = parent_key + joinchar + k if parent_key else k if isinstance(v, collections.MutableMapping): items.extend(flatten_dict(v, new_key, joinchar).items()) else: items.append((new_key, v)) return dict(items)
5567d880287af3641dc96c7e79b81f93e9d3fbfd
33,823
import torch def face_vertices(vertices, faces): """ :param vertices: [batch size, number of vertices, 3] :param faces: [batch size, number of faces, 3] :return: [batch size, number of faces, 3, 3] """ assert (vertices.ndimension() == 3) assert (faces.ndimension() == 3) assert (vertices.shape[0] == faces.shape[0]) assert (vertices.shape[2] == 3) assert (faces.shape[2] == 3) bs, nv = vertices.shape[:2] bs, nf = faces.shape[:2] device = vertices.device faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None] vertices = vertices.reshape((bs * nv, 3)) # pytorch only supports long and byte tensors for indexing return vertices[faces.long()]
ecf99dd157044034abcc6bdf12d307a8f560bd9e
33,824
def get_results(tickers=None): """ Given a list of stock tickers, this print formatted results to the terminal. """ if tickers == ['']: return stocks = QuarterlyReport.get_stocks(tickers) if verbose: string_header = '{:<10}'.format('Ticker') + '{:<17}'.format('Earnings Date') string_to_line = '' if peg_ratio: string_header += '{:<13}'.format('Peg Ratio') if pe_ratio: string_header += '{:<13}'.format('PE Ratio') if rsi: string_header += '{:<13}'.format('RSI') if fifty_two: string_header += '{:<20}'.format('52Wk H&L') string_header += '{:<12}'.format('Price') print(string_header) for stock in stocks: string_to_line = '{:<10}'.format(stock.ticker) + '{:<17}'.format(stock.earnings_date) if peg_ratio: string_to_line += '{:<13}'.format(str(stock.peg_ratio)) if pe_ratio: string_to_line += '{:<13}'.format(str(stock.pe_ratio)) if rsi: string_to_line += '{:<13}'.format(str(stock.rsi)) if fifty_two: string_to_line += '{:<20}'.format(str(stock.fifty_two)) string_to_line += '{:<12}'.format(str(stock.price)) print(string_to_line) return stocks
21d298df4c8fe34d055f6978cf96306fa31fcce1
33,825
def reverse_mapper_or_checker(container): """Callable to REVERSE map the function parameter values. Parameters ---------- container : dict-like object Raises ------ TypeError If the unit argument cannot be interpreted. Example ------- >>> conv = reverse_mapper_or_checker({True: 42}) >>> conv(42) True """ #: Shared cache of reversed dictionaries indexed by the id() __reversed_cache = {} if isinstance(container, dict): return mapper({value: key for key, value in container.items()}) if isinstance(container, set): return membership_checker(container) raise TypeError('reverse_mapper argument must be a dict or set, ' 'not {}'.format(container))
616de1fc536af944f6a1bce23b3bdc7c867aa5d8
33,826
def smallest_boundary_value(fun, discretization): """Determine the smallest value of a function on its boundary. Parameters ---------- fun : callable A tensorflow function that we want to evaluate. discretization : instance of `GridWorld` The discretization. If None, then the function is assumed to be defined on a discretization already. Returns ------- min_value : float The smallest value on the boundary. """ min_value = np.inf feed_dict = get_feed_dict(tf.get_default_graph()) # Check boundaries for each axis for i in range(discretization.ndim): # Use boundary values only for the ith element tmp = list(discretization.discrete_points) tmp[i] = discretization.discrete_points[i][[0, -1]] # Generate all points columns = (x.ravel() for x in np.meshgrid(*tmp, indexing='ij')) all_points = np.column_stack(columns) # Update the minimum value smallest = tf.reduce_min(fun(all_points)) min_value = min(min_value, smallest.eval(feed_dict=feed_dict)) return min_value
6a20935d86506cb4bd5d1405301edee02562615b
33,828
def read_ids(idtype, idfile): """Read ids from idfile of type idtype.""" print "Using %s file: %s" % (idtype, idfile) ids = read_dot_name(idfile) print "%d %s read in." % (len(ids), idtype) return ids
815381ce374026c119a4789674de899439e7d76b
33,829
def get_pseudo_class_checker(psuedo_class): """ Takes a psuedo_class, like "first-child" or "last-child" and returns a function that will check if the element satisfies that psuedo class """ return { 'first-child': lambda el: is_first_content_node(getattr(el, 'previousSibling', None)), 'last-child': lambda el: is_last_content_node(getattr(el, 'nextSibling', None)) }.get(psuedo_class, lambda el: False)
ad52b55d37f58c2628db88d7f94dcca85ef7e730
33,830
def gammaincinv(y, s): """ Calculates the inverse of the regularized lower incomplete gamma function, i.e.:: \gamma(x; s) = 1/\Gamma(s)\int_0^x t^{s-1}e^{-t} \mathrm{d}t NOTE: Inspired by: https://github.com/minrk/scipy-1/blob/master/scipy/special/c_misc/gammaincinv.c Parameters ---------- y : float or array_like, shape (n,) Variable to be evaluated s : float or array_like, shape (n,) Gamma function parameter Returns ------- y : array_like, shape (n,) Resulting value """ return gammainccinv(1. - y, s)
5b4aa9de4c8b81a1876aafc7f3e7d49384decee4
33,831
def divide_with_zero_divisor(dividend, divisor): """Returns 0 when divisor is zero. Args: dividend: Numpy array or scalar. divisor: Numpy array or scalar. Returns: Scalar if both inputs are scalar, numpy array otherwise. """ # NOTE(leeley): The out argument should have the broadcasting shape of # (dividend, divisor) instead of shape of dividend solely. # Thus, in case dividend is scalar and divisor is numpy array, if there is an # zero element in divisor array, the output will still have the shape of # divisor array. # out argument cannot be omitted. The default np.true_divide will output empty # instead of zero at ~where. broadcast = np.ones(np.broadcast(dividend, divisor).shape) return np.true_divide( dividend, divisor, out=np.zeros_like(broadcast), where=divisor * broadcast != 0)
80d6acae02da5155ab9d2c0c9b97d12597f79806
33,833
def str_strip(x): """ Apply .strip() string method to a value. """ if isnull(x): return x else: return str(x).strip()
1767625aaee859d506d936d3fb471f13647b9121
33,834
import mimetypes def _guess_filetype(filename): """Return a (filetype, encoding) tuple for a file.""" mimetypes.init() filetype = mimetypes.guess_type(filename) if not filetype[0]: textchars = bytearray([7, 8, 9, 10, 12, 13, 27]) + bytearray( range(0x20, 0x100)) with open(filename) as fd: if fd.read(1024).translate(None, textchars): filetype = ('application/unknown', None) else: filetype = ('text/plain', None) return filetype
356ed4180b00101a77c4424c5b75ad8a8a473559
33,835
import re def inline_anchor_check(stripped_file): """Check if the in-line anchor directly follows the level 1 heading.""" if re.findall(Regex.INLINE_ANCHOR, stripped_file): return True
f0817856f0bb4848470b6179ee59222cce3745d5
33,836
import numpy def u_inverse(U, check=False, verbose=False): """invert a row reduced U """ m, n = U.shape #items = [] leading = [] for row in range(m): cols = numpy.where(U[row, :])[0] if not len(cols): break col = cols[0] leading.append(col) U1 = zeros2(n, m) #print shortstrx(U, U1) # Work backwards i = len(leading)-1 # <= m while i>=0: j = leading[i] #print "i=", i, "j=", j U1[j, i] = 1 #print shortstrx(U, U1) k = i-1 while k>=0: #print "dot2" #print shortstr(U[k,:]) #print shortstr(U1[:,i]) if dot2(U[k, :], U1[:, i]): j = leading[k] #print "set", j, i U1[j, i] = 1 #print shortstr(U1[:,i]) assert dot2(U[k, :], U1[:, i]) == 0 k -= 1 i -= 1 return U1
896bf3d79a790e4bae143c06dea2590f3348b800
33,837
def get_filters(component): """ Get the set of filters for the given datasource. Filters added to a ``RegistryPoint`` will be applied to all datasources that implement it. Filters added to a datasource implementation apply only to that implementation. For example, a filter added to ``Specs.ps_auxww`` will apply to ``DefaultSpecs.ps_auxww``, ``InsightsArchiveSpecs.ps_auxww``, ``SosSpecs.ps_auxww``, etc. But a filter added to ``DefaultSpecs.ps_auxww`` will only apply to ``DefaultSpecs.ps_auxww``. See the modules in ``insights.specs`` for those classes. Args: component (a datasource): The target datasource Returns: set: The set of filters defined for the datasource """ def inner(c, filters=None): filters = filters or set() if not ENABLED: return filters if not plugins.is_datasource(c): return filters if c in FILTERS: filters |= FILTERS[c] for d in dr.get_dependents(c): filters |= inner(d, filters) return filters if component not in _CACHE: _CACHE[component] = inner(component) return _CACHE[component]
d6252d5ecc12ba7cc24f0f2d233d41014e34b637
33,839
import collections def parse_traffic_congestion(traffic): """Return parsed traffic congestion by regions.""" regions_distances = collections.defaultdict(list) regions_polygons = parse_regions_bounds() for route in traffic: trip_distance = route["trip_distance"] point = Point((route["trip_latitude"], route["trip_longitude"])) for name, poly in regions_polygons.items(): if poly.contains(point): regions_distances[name].append(trip_distance) def get_congestion_percentage(distances): """Return region congestion in percentage.""" distances = list(filter(lambda x: x != 0, distances)) try: avg_distance = sum(distances) / len(distances) return 100 / avg_distance / MIN_DISTANCE except ZeroDivisionError: return 0 timestamp = traffic[0]["timestamp"] if traffic else None traffic_congestion = [{ "id": region, "value": get_congestion_percentage(distances), "timestamp": timestamp } for region, distances in regions_distances.items()] return traffic_congestion
298e8d8b7f77e9054ff5036184e8ac2bf219b139
33,840
from typing import Callable import torch def batch_mean_metric_torch( base_metric: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], predictions: torch.Tensor, ground_truth: torch.Tensor, ) -> torch.Tensor: """During training, we may wish to produce a single prediction for each prediction request (i.e., just sample once from the posterior predictive; similar to standard training of an MC Dropout model). Then, we simply average over the batch dimension. For a Torch model we would expect a Torch base metric (e.g., `average_displacement_error_torch`), Torch tensor inputs, and a torch.Tensor return type for backpropagation. Args: base_metric: Callable, function such as `average_displacement_error_torch` predictions: shape (B, T, 2) where B is the number of prediction requests in the batch. ground_truth: shape (T, 2), there is only one ground truth trajectory for each prediction request. """ return torch.mean( base_metric(predicted=predictions, ground_truth=ground_truth))
7ad19c3ccaa49ea572131ae19ce47af23ad8c821
33,842
def aggregate_tree(l_tree): """Walk a py-radix tree and aggregate it. Arguments l_tree -- radix.Radix() object """ def _aggregate_phase1(tree): # phase1 removes any supplied prefixes which are superfluous because # they are already included in another supplied prefix. For example, # 2001:67c:208c:10::/64 would be removed if 2001:67c:208c::/48 was # also supplied. n_tree = radix.Radix() for prefix in tree.prefixes(): if tree.search_worst(prefix).prefix == prefix: n_tree.add(prefix) return n_tree def _aggregate_phase2(tree): # phase2 identifies adjacent prefixes that can be combined under a # single, shorter-length prefix. For example, 2001:67c:208c::/48 and # 2001:67c:208d::/48 can be combined into the single prefix # 2001:67c:208c::/47. n_tree = radix.Radix() for rnode in tree: p = text(ip_network(text(rnode.prefix)).supernet()) r = tree.search_covered(p) if len(r) == 2: if r[0].prefixlen == r[1].prefixlen == rnode.prefixlen: n_tree.add(p) else: n_tree.add(rnode.prefix) else: n_tree.add(rnode.prefix) return n_tree l_tree = _aggregate_phase1(l_tree) if len(l_tree.prefixes()) == 1: return l_tree while True: r_tree = _aggregate_phase2(l_tree) if l_tree.prefixes() == r_tree.prefixes(): break else: l_tree = r_tree del r_tree return l_tree
20b73cb24d6989b3ec27706065da0a32eb1aef6c
33,843
def is_dag_acyclic(root_vertex): """Perform an acyclicity check for a given DAG. Returns: True -- If the DAG contains cycles. False -- If the DAG does not contain cycles. """ visited = set() for vertex in topological_traverse(root_vertex): if vertex in visited: # DAG has cycles return False else: visited.add(vertex) return True
f15104e4c515b9a7ad65e6505b717e4a5fa4624d
33,844
def nextPow2(length): """ Find next power of 2 <= length """ return int(2**np.ceil(np.log2(length)))
3cab0a91795035358ce0c759f7659089e09bd1e8
33,845
def convert_parameter_to_model_parameter(parameter, value, meta=None): """Convert a Cosmology Parameter to a Model Parameter. Parameters ---------- parameter : `astropy.cosmology.parameter.Parameter` value : Any meta : dict or None, optional Information from the Cosmology's metadata. This function will use any of: 'getter', 'setter', 'fixed', 'tied', 'min', 'max', 'bounds', 'prior', 'posterior'. Returns ------- `astropy.modeling.Parameter` """ # Get from meta information relavant to Model extra = {k: v for k, v in (meta or {}).items() if k in ('getter', 'setter', 'fixed', 'tied', 'min', 'max', 'bounds', 'prior', 'posterior')} return ModelParameter(description=parameter.__doc__, default=value, unit=getattr(value, "unit", None), **extra)
90029124d46514d12b554491c2430ac81a351768
33,846
import torch def check_decoder_output(decoder_output): """we expect output from a decoder is a tuple with the following constraint: - the first element is a torch.Tensor - the second element can be anything (reserved for future use) """ if not isinstance(decoder_output, tuple): msg = "FariseqDecoder output must be a tuple" + _current_postion_info() return False, msg if len(decoder_output) != 2: msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info() return False, msg if not isinstance(decoder_output[0], torch.Tensor): msg = ( "FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info() ) return False, msg return True, None
06728dc055c3487511ee5a0f645eccb89c412ba7
33,847
def _write_segmented_read( model, read, segments, do_simple_splitting, delimiters, bam_out ): """Split and write out the segments of each read to the given bam output file. NOTE: Assumes that all given data are in the forward direction. :param model: The model to use for the array segment information. :param read: A pysam.AlignedSegment object containing a read that has been segmented. :param segments: A list of SegmentInfo objects representing the segments of the given reads. :param do_simple_splitting: Flag to control how reads should be split. If True, will use simple delimiters. If False, will require reads to appear as expected in model.array_element_structure. :param delimiters: A list of tuples containing the names of delimiter sequences to use to split the given read. :param bam_out: An open pysam.AlignmentFile ready to write out data. :return: the number of segments written. """ if do_simple_splitting: segment_bounds_tuples = segment_read_with_simple_splitting(read, delimiters, segments) for prev_delim_name, delim_name, start_coord, end_coord in segment_bounds_tuples: # Write our segment here: _write_split_array_element( bam_out, start_coord, end_coord, read, segments, delim_name, prev_delim_name, ) return len(segment_bounds_tuples) else: # Here we're doing bounded region splitting. # This requires each read to conform to the expected read structure as defined in the model. # The process is similar to what is done above for simple splitting. delimiter_found, delimiter_segments = segment_read_with_bounded_region_algorithm(read, model, segments) # Now we have our segments as described by our model. # We assume they don't overlap and we write them out: for i, seg_list in enumerate(delimiter_segments): if delimiter_found[i]: start_seg = seg_list[0] end_seg = seg_list[-1] start_coord = start_seg.start end_coord = end_seg.end start_delim_name = seg_list[0].name end_delim_name = seg_list[-1].name # Write our segment here: _write_split_array_element( bam_out, start_coord, end_coord, read, seg_list, end_delim_name, start_delim_name, ) # Return the number of array elements. # NOTE: this works because booleans are a subset of integers in python. return sum(delimiter_found)
e92f3d258653c3cca8a379b4f6a3f2f72d4a54d9
33,848
def sphinx(**kwargs): """ Run sphinx """ prog = _shell.frompath('sphinx-build') if prog is None: _term.red("sphinx-build not found") return False env = dict(_os.environ) argv = [ prog, '-a', '-d', _os.path.join(kwargs['build'], 'doctrees'), '-b', 'html', kwargs['source'], kwargs['target'], ] return not _shell.spawn(*argv, **{'env': env})
353479427fb434bd63af44dbfecd621973e694cf
33,849
def on_launch(launch_request, session): """ Called when the user launches the skill without specifying what they want """ print("on_launch requestId=" + launch_request['requestId'] + ", sessionId=" + session['sessionId']) # Dispatch to your skill's launch return get_welcome_response(launch_request, session)
64d3854459b2a5f454607bf4de12154b342f2ace
33,850
def generate_all_overhangs(overhang_length=4): """Generate list Overhang class instances for all overhangs of given length. **Parameters** **overhang_length** > Length of overhangs (`int`). """ overhang_pairs = generate_overhang_pairs(overhang_length=overhang_length) overhang_strings = [next(iter(overhang_pair)) for overhang_pair in overhang_pairs] overhang_strings.sort() overhangs = [] for overhang_string in overhang_strings: overhang_class = Overhang(overhang_string) overhangs += [overhang_class] return overhangs
04268fd00fb5718d224e477180f6794b0144bfee
33,851
import csv def csv_to_list(filename: str) -> list: """Receive an csv filename and returns rows of file with an list""" with open(filename) as csv_file: reader = csv.DictReader(csv_file) csv_data = [line for line in reader] return csv_data
d7344496271de6edcb3fc1df30bb78dd00980c30
33,852
import resource def create_rlimits(): """ Create a list of resource limits for our jailed processes. """ rlimits = [] # No subprocesses. rlimits.append((resource.RLIMIT_NPROC, (0, 0))) # CPU seconds, not wall clock time. cpu = LIMITS["CPU"] if cpu: # Set the soft limit and the hard limit differently. When the process # reaches the soft limit, a SIGXCPU will be sent, which should kill the # process. If you set the soft and hard limits the same, then the hard # limit is reached, and a SIGKILL is sent, which is less distinctive. rlimits.append((resource.RLIMIT_CPU, (cpu, cpu+1))) # Total process virtual memory. vmem = LIMITS["VMEM"] if vmem: rlimits.append((resource.RLIMIT_AS, (vmem, vmem))) # Size of written files. Can be zero (nothing can be written). fsize = LIMITS["FSIZE"] rlimits.append((resource.RLIMIT_FSIZE, (fsize, fsize))) return rlimits
f3cf9589f9784295d620f2ff2c2b17d09a56c8df
33,853
def sqrt(x): """ Calculate the square root of argument x. """ #initial gues for square root z = x/2.0 #Continuously improve the guess #Adadapted from https://tour.golang.org/flowcontrol/8 while abs(x - (z*z)) > 0.0001: z = z-(z*z - x) / (2*z) return z
5598eb37bc56e3f514f75be0deae0b6a94c3831e
33,854
def create_french_dict_from(file_path): """Transform a text file containing (weight, word) tuples into python dictionnary.""" with open(file_path, 'r', encoding="ISO-8859-1") as file: lines = file.readlines() french_dict = {} for line in lines: couple = line.strip().replace('\n', '').split(' ') word = remove_accent_from(couple[1].lower()) weight = int(couple[0]) if word in french_dict: french_dict[word] += weight else: french_dict[word] = weight for c in word: if ord(c) < ord('a') or ord(c) > ord('z'): del french_dict[word] break return french_dict
435be928dc8e3e03e55b4e0b485ee633b21c1b3c
33,855
def construct(data_dir, fname, Y=None, normalize=False, _type='sparse'): """Construct label class based on given parameters Arguments ---------- data_dir: str data directory fname: str load data from this file Y: csr_matrix or None, optional, default=None data is already provided normalize: boolean, optional, default=False Normalize the labels or not Useful in case of non binary labels _type: str, optional, default=sparse -sparse or dense """ if fname is None and Y is None: # No labels are provided return LabelsBase(data_dir, fname, Y) else: if _type == 'sparse': return SparseLabels(data_dir, fname, Y, normalize) elif _type == 'dense': return DenseLabels(data_dir, fname, Y, normalize) elif _type == 'Graph': return GraphLabels(data_dir, fname, Y, normalize) else: raise NotImplementedError("Unknown label type")
397101b14b33d92921a14f43f7f4184e759a33a1
33,856
def test_NN_MUL_REDC1(op): """ Generate tests for NN_MUL_REDC1 """ # Odd modulus nn_mod = get_random_bigint(wlen, MAX_INPUT_PARAM_WLEN) | 1 nn_r, nn_r_square, mpinv = compute_monty_coef(nn_mod, getwlenbitlen(nn_mod, wlen)) # random value for input numbers modulo our random mod nn_in1 = get_random_bigint(wlen, MAX_INPUT_PARAM_WLEN) % nn_mod nn_in2 = get_random_bigint(wlen, MAX_INPUT_PARAM_WLEN) % nn_mod # Montgomery multiplication computes in1 * in2 * r^-1 (mod) out = (nn_in1 * nn_in2 * modinv(nn_r, nn_mod)) % nn_mod fmt = "%s nnnnu %s %s %s %s %d\n" s = fmt % (op, format_int_string(out, wlen), format_int_string(nn_in1, wlen), format_int_string(nn_in2, wlen), format_int_string(nn_mod, wlen), mpinv) return [ s ]
cc45a7970079d0f0579f1a9e97f773c9202e5674
33,857
import json def GetAzureStorageConnectionString(storage_account_name, resource_group_args): """Get connection string.""" stdout, _ = vm_util.IssueRetryableCommand( [AZURE_PATH, 'storage', 'account', 'show-connection-string', '--name', storage_account_name] + resource_group_args + AZURE_SUFFIX) response = json.loads(stdout) return response['connectionString']
625fe1d0302a6ab4a29b1ec98a1cffccaabdeb93
33,858
def clean_data(answers, dupes, min_dupes, min_text, questions, show_output): """ :param answers: :param dupes: :param min_dupes: :param min_text: :param questions: :param show_output: :return: """ for dataframe in (questions, dupes, answers): dataframe["Text"] = dataframe.Text0.apply(clean_text).str.lower() questions = questions[questions.Text.str.len() > 0] answers = answers[answers.Text.str.len() > 0] dupes = dupes[dupes.Text.str.len() > 0] if show_output: print(questions.iloc[0, 1]) print(questions.iloc[0, 3]) # First, remove dupes that are questions, then remove duplicated questions and dupes. dupes = dupes[~dupes.index.isin(questions.index)] questions = questions[~questions.index.duplicated(keep="first")] dupes = dupes[~dupes.index.duplicated(keep="first")] # Keep only questions with answers and dupes, answers to questions, and dupes of questions. questions = questions[ questions.AnswerId.isin(answers.index) & questions.AnswerId.isin(dupes.AnswerId) ] answers = answers[answers.index.isin(questions.AnswerId)] dupes = dupes[dupes.AnswerId.isin(questions.AnswerId)] verify_data_integrity(answers, dupes, questions) # Report on the data. if show_output: print("Text statistics:") print( pd.DataFrame( [ questions.Text.str.len().describe().rename("questions"), answers.Text.str.len().describe().rename("answers"), dupes.Text.str.len().describe().rename("dupes"), ] ) ) print("\nDuplication statistics:") print( pd.DataFrame( [dupes.AnswerId.value_counts().describe().rename("duplications")] ) ) print( "\nLargest class: {:.2%}".format( dupes.AnswerId.value_counts().max() / dupes.shape[0] ) ) # Reset each dataframe's index. questions.reset_index(inplace=True) answers.reset_index(inplace=True) dupes.reset_index(inplace=True) # Apply the minimum text length to questions and dupes. questions = questions[questions.Text.str.len() >= min_text] dupes = dupes[dupes.Text.str.len() >= min_text] # Keep only questions with dupes, and dupes of questions. label_column = "AnswerId" questions = questions[questions[label_column].isin(dupes[label_column])] dupes = dupes[dupes[label_column].isin(questions[label_column])] # Restrict the questions to those with a minimum number of dupes. answerid_count = dupes.groupby(label_column)[label_column].count() answerid_min = answerid_count.index[answerid_count >= min_dupes] questions = questions[questions[label_column].isin(answerid_min)] dupes = dupes[dupes[label_column].isin(answerid_min)] # Verify data integrity. assert questions[label_column].isin(dupes[label_column]).all() assert dupes[label_column].isin(questions[label_column]).all() # Report on the data. if show_output: print("Restrictions: min_text={}, min_dupes={}".format(min_text, min_dupes)) print("Restricted text statistics:") print( pd.DataFrame( [ questions.Text.str.len().describe().rename("questions"), dupes.Text.str.len().describe().rename("dupes"), ] ) ) print("\nRestricted duplication statistics:") print( pd.DataFrame( [dupes[label_column].value_counts().describe().rename("duplications")] ) ) print( "\nRestricted largest class: {:.2%}".format( dupes[label_column].value_counts().max() / dupes.shape[0] ) ) return dupes, label_column, questions
6d9d753bf2d8267fb517c21de2dc2a34357dfb90
33,859
def on_intent(intent_request, session): """ Called when the user specifies an intent for this skill """ print("on_intent requestId=" + intent_request['requestId'] + ", sessionId=" + session['sessionId']) intent_name = intent_request['intent']['name'] print(intent_request['intent']['name']) # Dispatch to your skill's intent handlers if intent_name == "HowManyAstronautsinISS": return countAstronauts() elif intent_name == "AMAZON.HelpIntent": return get_welcome_response() elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent": return handle_session_end_request() else: raise ValueError("Invalid intent")
8d7a54b5a3f83a7dc3d9923e0631646b9fc27ab6
33,860
from typing import Type import inspect def ta_adaptor(indicator_mixin: Type[IndicatorMixin], function_name: str, **kwargs) -> callable: """Wraps strategies from ta to make them compatible with infertrade's interface.""" indicator_parameters = inspect.signature(indicator_mixin.__init__).parameters allowed_keys = ["close", "open", "high", "low", "volume"] column_strings = [] parameter_strings = {} for ii_parameter_index in range(len(indicator_parameters)): if list(indicator_parameters.items())[ii_parameter_index][0] in allowed_keys: # This is an input column that needs to be mapped to a Pandas Series. column_strings.append(list(indicator_parameters.items())[ii_parameter_index][0]) elif list(indicator_parameters.items())[ii_parameter_index][0] != "self": # This is parameter that needs to mapped to a default value. name_of_parameter = list(indicator_parameters.items())[ii_parameter_index][0] default_value_of_parameter = list(indicator_parameters.items())[ii_parameter_index][1].default if not isinstance(default_value_of_parameter, (float, int)): # Where empty we set to 10. default_value_of_parameter = DEFAULT_VALUE_FOR_MISSING_DEFAULTS parameter_strings.update({name_of_parameter: default_value_of_parameter}) # We override with any supplied arguments. parameter_strings.update(kwargs) def func(df: pd.DataFrame) -> pd.DataFrame: """Inner function to create a Pandas -> Pandas interface.""" column_inputs = {column_name: df[column_name] for column_name in column_strings} indicator = indicator_mixin(**column_inputs, **parameter_strings) indicator_callable = getattr(indicator, function_name) df[PandasEnum.SIGNAL.value] = indicator_callable() return df return func
abf0f7851f1a47263aca4bb2e032af48d0ab5b0a
33,861
def flops(program, only_conv=True, detail=False): """Get FLOPs of target graph. Args: program(Program): The program used to calculate FLOPS. only_conv(bool): Just return number of mul-adds in convolution and FC layer if `only_conv` is true. default: True. detail(bool): Whether to return detail of each convolution layer. Returns: int|tuple: If `detail` is true, then return a tuple in format `(FLOPs, details)`, otherwise it will just return `FlOPs`. The details is a dict whose key is the parameter name of convlution layer and value is the FLOPs of each convolution layer. """ graph = GraphWrapper(program) return _graph_flops(graph, only_conv=only_conv, detail=detail)
25cdfa159addfe2ef52be8aa6d3f1122df1332e0
33,863
import math def eq11 (A): """Chemsep equation 11 :param A: Equation parameter A""" return math.exp(A)
354b33a14f17de2862e5674edc421045c3dd21a9
33,864
def extractor_to_question(extractor: str): """ return questions for a extractor in a tuple :param extractor: :return: """ if extractor == 'action': return ('who', 'what') elif extractor == 'cause': return ('why',) elif extractor == 'environment': return ('where', 'when') elif extractor == 'method': return ('how',) else: return ('no_mapping',)
9f32562b426b59c4e44efab32064045796ec27ed
33,865
def getpage(url): """ Downloads the html page :rtype: tuple :param url: the page address :return: the header response and contents (bytes) of the page """ http = httplib2.Http('.cache', disable_ssl_certificate_validation=True) headers = { 'User-agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36'} response, content = http.request(url, headers=headers) return response, content
1bb8fa3bdc1a083826dc509723b4855d0ec41990
33,866
from typing import Union import contextlib def path_to_xpath(node_or_path: Union[str, ncs.maagic.Node]) -> str: """Get the XPath to a node (keypath or maagic Node) The input can be either: - string keypath (/devices/device{foo}/config/alu:port...), - maagic Node instance. The helper will start a new MAAPI read transaction if necessary. :param node_or_path: string keypath or Node instance :returns: a string XPath to the node """ with contextlib.ExitStack() as stack: if isinstance(node_or_path, str): # for a string input, we need a MAAPI session and read transaction t_read = stack.enter_context(ncs.maapi.single_read_trans('python-path-to-xpath', 'system')) t_read.pushd(node_or_path) elif isinstance(node_or_path, ncs.maagic.Node): # for a Node input, we first attempt to retrieve the backend transaction try: t_read = ncs.maagic.get_trans(node_or_path) except ncs.maagic.BackendError: try: # if there is no backend transaction, try to get the backend MAAPI # and start a new transaction maapi = ncs.maagic.get_maapi(node_or_path) t_read = stack.enter_context(maapi.start_read_trans()) except ncs.maagic.BackendError: # if the node has no compatible backend, we're back to square one # and need to set up MAAPI and read transaction t_read = stack.enter_context(ncs.maapi.single_read_trans('python-path-to-xpath', 'system')) t_read.pushd(node_or_path._path) xpath = _ncs.xpath_pp_kpath(t_read.getcwd_kpath()) # pylint: disable=no-member t_read.popd() return xpath
5f91fccb3d32c780712e7487f06d05b55044073d
33,867