content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def gather_inputs(headers, test_suites, inputs_class=Inputs): """Read the list of inputs to test psa_constant_names with.""" inputs = inputs_class() for header in headers: inputs.parse_header(header) for test_cases in test_suites: inputs.parse_test_cases(test_cases) inputs.gather_arguments() return inputs
18300cab225f817a7a09f73e4b957713ee45d0c8
10,800
def key_create(adapter_id): """Creates a key using a certain adapter.""" adapter = get_adapter(adapter_id) if not adapter: return output.failure("That adapter doesn't (yet) exist. Please check the adapter name and try again.", 501) if not adapter.do_verify(request.headers): return output.failure("Credential verification failed. Please check your credentials and try again.", 401) result = adapter.do_key_create(request.headers, request.json) if 'error' in result: return output.failure(result['error'], result['status']) return output.success(result['data'], result['status'])
ec07091f3bb96f469338643f36b63ade50de3205
10,801
def is_right(side1, side2, side3): """ Takes three side lengths and returns true if triangle is right :param side1: int or float :param side2: int or float :param side3: int or float :return: bool """ return False
2d22bbc7d0d363b360f578002a6380a4ae5f5b63
10,802
def parser_electron_number(electron_line): """ function of parser for electron information Args: electron_line (str): line Returns: list: electron information """ electron_list = parser_split_line_by_length(electron_line.rstrip(), CPF_FORMAT["ELECTRON"]["length"], "int") return electron_list
3a444aa0cb062ea5cfaac3e7686ff762e42ebf4c
10,803
def summary(t, rtol=1e-5, atol=1e-8): """ Parameters ---------- t rtol atol Returns ------- """ deltas = np.diff(t) if np.allclose(deltas, deltas[0], rtol, atol): # constant time steps return deltas[0], deltas, '' # non-constant time steps! unqdt = np.unique(deltas) mode = stats.mode(deltas) dt = mode.mode if len(unqdt) > 5: info = f'{len(unqdt)} unique values between {deltas.min(), deltas.max()}' else: info = str(unqdt) return dt, unqdt, f'Non-constant time steps: {info}'
0f1a5a65d832be8db35b8bdf145e6240d6072f71
10,804
def masked_huber(input, target, lengths): """ Always mask the first (non-batch dimension) -> usually time :param input: :param target: :param lengths: :return: """ m = mask(input.shape, lengths, dim=1).float().to(input.device) return F.smooth_l1_loss(input * m, target * m, reduction='sum') / m.sum()
c4eab136b73ffc92034a217252ac290848f77982
10,805
def calcProbabilisticResiduals( coords_actual, coords_desired, covariances_actual ): """ Calculate the probabilistic residual. Parameters ---------- coords_actual : `~numpy.ndarray` (N, M) Actual N coordinates in M dimensions. coords_desired : `~numpy.ndarray` (N, M) The desired N coordinates in M dimensions. sigmas_actual : `~numpy.ndarray` (N, M) The 1-sigma uncertainties of the actual coordinates. covariances_actual : list of N `~numpy.ndarray`s (M, M) The covariance matrix in M dimensions for each actual observation if available. Returns ------- p : `~numpy.ndarray` (N) The probability that the actual coordinates given their uncertainty belong to the same multivariate normal distribution as the desired coordinates. d : `~numpy.ndarray` (N) The Mahalanobis distance of each coordinate compared to the desired coordinates. """ d = np.zeros(len(coords_actual)) p = np.zeros(len(coords_actual)) for i, (actual, desired, covar) in enumerate(zip(coords_actual, coords_desired, covariances_actual)): # Calculate the degrees of freedom k = len(actual) # Calculate the mahalanobis distance between the two coordinates d_i = mahalanobis( actual, desired, np.linalg.inv(covar) ) # Calculate the probability that both sets of coordinates are drawn from # the same multivariate normal p_i = 1 - chi2.cdf(d_i, k) # Append results d[i] = d_i p[i] = p_i return p, d
c5bdc4048d9fef2e6b40e3bc48c80e6f6e2fcca7
10,806
import re def split_words_and_quoted_text(text): """Split string text by space unless it is wrapped inside double quotes, returning a list of the elements. For example if text = 'Should give "3 elements only"' the resulting list would be: ['Should', 'give', '3 elements only'] """ # using shlex # return shlex.split(text) # using re result = list() pattern = re.findall(r'\w+\s*|\".+?\"', text) for char in pattern: result.append(char.strip().replace('"', '')) return result
befb31949d4c52fac96765fd78bc1b9d644282ba
10,807
def scheduler(epoch): """Generating learning rate value for a given epoch. inputs: epoch = number of current epoch outputs: learning_rate = float learning rate value """ if epoch < 100: return 1e-3 elif epoch < 125: return 1e-4 else: return 1e-5
916cbc12ff76b8d022a96c89083b8bd2a3078c69
10,808
def external_search(query, feature_type, url): """ Makes an external search request to a specified URL. The url will have the search text appended to it. Returns geojson matches with extra data for the geocoder. """ logger.info("using external API for feature lookup: %s", url + query) req = ExternalAPIRequest( url=url + query, layer=feature_type, q={}, paginate=False ) # Fetch features. feature_collection = fetch_geojson_features([req]) features = feature_collection[0].geojson['features'] geocoder_features = [] for feature in features: feature['layer'] = feature_type feature['center'] = (feature.geometry.coordinates[0], feature.geometry.coordinates[1]) feature['place_name'] = str(feature.properties['well_tag_number']) geocoder_features.append(feature) return geocoder_features
f90ea54dd8036b4237a74dd398cf3f2698ab4d0f
10,809
import os def has_supervisor() -> bool: """Return true if supervisor is available.""" return "SUPERVISOR" in os.environ
5af98347acfdcc50c1b4ca80e01597c584e3a45a
10,810
from setuptools import setup from distutils.core import setup def setup(*args, **kwds): """ Compatibility wrapper. """ try: except ImportError: return setup(*args, **kwds)
174fd60c91c661e9c104c2b62a4966097d4faa57
10,811
def joinpath(base, end): """Like Path.joinpath(), but ensures the result is inside `base`. Should be used for user-supplied `end`. """ result = (base / end).resolve() if base not in result.parents: print(base, end, result) raise ValueError(end) return result
1b4f5afcdca21ceb6e676385602dd07b252db3ad
10,812
def multicolored_line_collection(x, y, z, colors): """ Color a 2D line based on which state it is in :param x: data x-axis values :param y: data y-axis values :param z: values that determine the color of each (x, y) pair """ nstates = colors.shape[0] # come up with color map and normalization (i.e. boundaries of colors) cmap = ListedColormap(colors) bounds = np.arange(-1, nstates) + 0.1 norm = BoundaryNorm(bounds, cmap.N) # add # create line segments to color individually points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) # Set the values used for colormapping lc = LineCollection(segments, cmap=cmap, norm=norm) lc.set_array(z) lc.set_linewidth(2) return lc
6d9438a58547d4be253ca2a505e05da259c73118
10,813
def foldr(fn, elems, initializer=None, parallel_iterations=10, back_prop=True, swap_memory=False, name=None): """foldr on the list of tensors unpacked from `elems` on dimension 0. This foldr operator repeatedly applies the callable `fn` to a sequence of elements from last to first. The elements are made of the tensors unpacked from `elems`. The callable fn takes two tensors as arguments. The first argument is the accumulated value computed from the preceding invocation of fn. If `initializer` is None, `elems` must contain at least one element, and its first element is used as the initializer. Suppose that `elems` is unpacked into `values`, a list of tensors. The shape of the result tensor is `fn(initializer, values[0]).shape`. This method also allows multi-arity `elems` and output of `fn`. If `elems` is a (possibly nested) list or tuple of tensors, then each of these tensors must have a matching first (unpack) dimension. The signature of `fn` may match the structure of `elems`. That is, if `elems` is `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is: `fn = lambda (t1, [t2, t3, [t4, t5]]):`. Args: fn: The callable to be performed. elems: A tensor or (possibly nested) sequence of tensors, each of which will be unpacked along their first dimension. The nested sequence of the resulting slices will be the first argument to `fn`. initializer: (optional) A tensor or (possibly nested) sequence of tensors, as the initial value for the accumulator. parallel_iterations: (optional) The number of iterations allowed to run in parallel. back_prop: (optional) True enables support for back propagation. swap_memory: (optional) True enables GPU-CPU memory swapping. name: (optional) Name prefix for the returned tensors. Returns: A tensor or (possibly nested) sequence of tensors, resulting from applying `fn` consecutively to the list of tensors unpacked from `elems`, from last to first. Raises: TypeError: if `fn` is not callable. Example: ```python elems = [1, 2, 3, 4, 5, 6] sum = foldr(lambda a, x: a + x, elems) # sum == 21 ``` """ if not callable(fn): raise TypeError("fn must be callable.") def create_ta(elem): return tensor_array_ops.TensorArray( dtype=elem.dtype, size=n, dynamic_size=False, infer_shape=True).unstack(elem) in_graph_mode = not context.executing_eagerly() with ops.name_scope(name, "foldr", [elems]): # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode: # Any get_variable calls in fn will cache the first call locally and not # issue repeated network I/O requests for each iteration. varscope = vs.get_variable_scope() varscope_caching_device_was_none = False if varscope.caching_device is None: # TODO(ebrevdo): Change to using colocate_with here and in other # methods. varscope.set_caching_device(lambda op: op.device) varscope_caching_device_was_none = True # Convert elems to tensor array. n may be known statically. elems_flat = [ ops.convert_to_tensor(elem, name="elem") for elem in nest.flatten(elems) ] n = ( tensor_shape.dimension_value(elems_flat[0].shape[0]) or array_ops.shape(elems_flat[0])[0]) elems_ta = nest.map_structure(create_ta, elems) if initializer is None: i = n - 1 a = nest.map_structure(lambda elem: elem.read(i), elems_ta) else: i = n a = initializer def compute(i, a): i -= 1 elem = nest.map_structure(lambda elem: elem.read(i), elems_ta) a_out = fn(a, elem) return [i, a_out] _, r_a = control_flow_ops.while_loop( lambda i, a: i > 0, compute, [i, a], parallel_iterations=parallel_iterations, back_prop=back_prop, swap_memory=swap_memory, maximum_iterations=n) # TODO(akshayka): Remove the in_graph_mode check once caching devices are # supported in Eager if in_graph_mode and varscope_caching_device_was_none: varscope.set_caching_device(None) return r_a
db88c9a7e4690af92067801b5e8c42c587c59f12
10,814
def featurise_distances(diagram): """Create feature vector by distance-to-diagonal calculation. Creates a feature vector by calculating distances to the diagonal for every point in the diagram and returning a sorted vector. The representation is *stable* but might not be discriminative. Parameters ---------- diagram : `PersistenceDiagram` Persistence diagram to featurise. Can also be a generic 2D container for iterating over tuples. Returns ------- Sorted vector of distances to diagonal. The vector is sorted in descending order, such that high persistence points precede the ones of low persistence. """ distances = [_persistence(x, y) for x, y in diagram] return sorted(distances, reverse=True)
9c4f20be1deb2ed5073015939d48615f3b04c21b
10,815
def resize(source, width=None, height=None, filter=None, radius=1, wrapx=False, wrapy=False): """Create a new numpy image with the desired size. Either width or height can be null, in which case its value is inferred from the aspect ratio of the source image. Filter can be HERMITE, TRIANGLE, GAUSSIAN, NEAREST, LANCZOS, or MITCHELL. """ assert len(source.shape) == 3, 'Shape is not rows x cols x channels' assert width != None or height != None, 'Missing target size' aspect = source.shape[1] / source.shape[0] if width == None: width = height * aspect if height == None: height = width / aspect magnifying = width > source.shape[1] if filter == None: filter = MITCHELL if magnifying else LANCZOS return resample(source, width, height, filter, radius, wrapx, wrapy)
08fdc077dcea013fd8b0be4a195a860e6d5291ec
10,816
def load_and_classify_payload(config, service, entity, raw_record): """Return a loaded and classified payload.""" # prepare the payloads payload = load_stream_payload(service, entity, raw_record) payload = list(payload.pre_parse())[0] classifier = StreamClassifier(config=config) classifier.load_sources(service, entity) classifier.classify_record(payload) return payload
1931804b1535ba00b495879061492e25a43f91e8
10,817
def render_text(self, block: str, block_type: str, y: int) -> int: """ :param self: MarkdownRenderer :param block: string of text :param block_type: type of the text (e.g. headers, ordered/unordered lists, blockquotes, code etc) :param y: y-coordinate to start rendering on :return: y-coordinate after rendering is finished """ start_of_line_x = self.x if block_type == 'blockquote': start_of_line_x += self.indentation_quote quote_y_start = y x = start_of_line_x # Cleanup and stripping block = block \ .replace('\n', ' ') \ .strip(' ') if block[:3] == '<p>': block = block[3:] if block[-4:] == '</p>': block = block[:-4] code_flag = False bold_flag = False italic_flag = False position = None if block_type in ('h1', 'h2', 'h3'): # insert additional gap in front of h1 or h2 headers y += self.gap_line for word in block.split(" "): # _________ PREPARATION _________ # # inline code, bold and italic formatting word, position, code_flag, bold_flag, italic_flag = self.inline_formatting_preparation(word, position, code_flag, bold_flag, italic_flag) # _________ TEXT BLITTING _________ # # create surface to get width of the word to identify necessary linebreaks word = word + " " word = word.replace("&gt;", ">").replace("&lt;", "<") if code_flag: if position == 'first' or position == 'single': x += self.code_padding surface = self.get_surface(word, 'code', bold_flag, italic_flag) else: surface = self.get_surface(word, block_type, bold_flag, italic_flag) text_height = surface.get_height() # update for next line if not(x + surface.get_width() < self.x + self.w): # new line necessary y = y + text_height + self.gap_line x = start_of_line_x if self.is_visible(y) and self.is_visible(y + text_height): if block_type == 'blockquote': # draw quote-rectangle in front of text self.draw_quote_rect(y, y + self.get_surface(word, 'blockquote').get_height()) self.draw_code_background(code_flag, word, x, y, position) self.screen.blit(surface, (x, y)) # Update x for the next word x = x + surface.get_width() if code_flag and position in ('single', 'last'): x -= self.code_padding # reduce empty space by padding. # _________ FORMATTING RESET FOR NEXT WORD _________ # bold_flag = False if bold_flag and position == 'last' else bold_flag code_flag = False if code_flag and (position == 'last' or position == 'single') else code_flag italic_flag = False if italic_flag and position == 'last' else italic_flag position = 'Middle' if position == 'first' else position if block_type in ('h1', 'h2'): y = y + text_height * 0.5 # add an additional margin below h1 and h2 headers if block_type == 'h1': # insert subline below h1 headers y = y + text_height * 0.5 # add an additional margin below h1 headers for the subheader line y = self.draw_subheader_line(y) return y
ed3e18d9988d612f911d9f6c647cbdf7dfbf7b07
10,818
def tvadam_reconstructor(dataset='ellipses', name=None): """ :param dataset: Can be 'ellipses' or 'lodopab' :return: TV reconstructor for the specified dataset """ try: params = Params.load('{}_tvadam'.format(dataset)) standard_dataset = load_standard_dataset(dataset) if name is None: name = 'TV-Adam' reconstructor = TVAdamReconstructor(standard_dataset.ray_trafo, hyper_params=params.dict, name=name) return reconstructor except Exception as e: raise Exception('The reconstructor doesn\'t exist')
0b69d0ce60f05dc522449af66f70ee655389e13c
10,819
import re def process_spf_data(res, data): """ This function will take the text info of a TXT or SPF record, extract the IPv4, IPv6 addresses and ranges, request process include records and return a list of IP Addresses for the records specified in the SPF Record. """ # Declare lists that will be used in the function. ipv4 = [] ipv6 = [] includes = [] ip_list = [] # check first if it is a sfp record if not re.search(r'v\=spf', data): return # Parse the record for IPv4 Ranges, individual IPs and include TXT Records. ipv4.extend(re.findall('ip4:(\S*) ', "".join(data))) ipv6.extend(re.findall('ip6:(\S*)', "".join(data))) # Create a list of IPNetwork objects. for ip in ipv4: for i in IPNetwork(ip): ip_list.append(i) for ip in ipv6: for i in IPNetwork(ip): ip_list.append(i) # Extract and process include values. includes.extend(re.findall('include:(\S*)', "".join(data))) for inc_ranges in includes: for spr_rec in res.get_txt(inc_ranges): spf_data = process_spf_data(res, spr_rec[2]) if spf_data is not None: ip_list.extend(spf_data) # Return a list of IP Addresses return [str(ip) for ip in ip_list]
537a59dd9091df35ac2502e8b03f87e625b74b76
10,820
def create_knight(): """ Creates a new knight according to player input. Checks the knights module for how many points are to spend, and which attributes are available. It then asks the player for a name for the knight and to spend their points on the available attributes. Returns: A knight instance with the player's values """ knight_class = get_class() # get the constants from the knights module max_attr_points = knights.MAX_ATTRIBUTE_POINTS attributes = knights.ATTRIBUTES knight = None # this will be the instance to be returned name = input("What is your name?\n") # reapet until the input was correct and a knight was created while not knight: # display the attributes and how many points are to be spent spent_points = input( f"You have {max_attr_points} points to spend on " f"the attributes: { ', '.join(attributes) }.\n" "Submit your points separated either by commas or by spaces, " "like the list above with numbers instead of attribute names. " "Points must be integers.\n" ) try: # we allow to use commas or spaces, so we check what was used # we cast all input attribute points to integer since # attribute points are integer numbers if "," in spent_points: points = [int(val) for val in spent_points.split(",")] else: points = [int(val) for val in spent_points.split(" ")] # if not enough attributes were inputted, repeat the loop if len(points) != len(attributes): continue # knight the knight! Since knights take attributes as # one parameter each, we unzip the input list into the call knight = knight_class(name, *points) except ValueError: # When the casting to integer fails print("Could not parse. Were the points all integer?") continue except knights.KnightError as e: # a special error from the knights module that occurs when # there are errors in knighting a new knight print(f"Could not knight the knight: {str(e)}") continue return knight
8feed9cd71b68868d14cd1bcfe14ff9291cf2abd
10,821
async def get_bank_name(guild: discord.Guild = None) -> str: """Get the current bank name. Parameters ---------- guild : `discord.Guild`, optional The guild to get the bank name for (required if bank is guild-specific). Returns ------- str The bank's name. Raises ------ RuntimeError If the bank is guild-specific and guild was not provided. """ return await bank.get_bank_name(guild)
1e0e3f1a1de7925daf5810ac3bcc75508993a642
10,822
from typing import Optional from typing import Dict from typing import Any import tempfile def build_cli_lib(to_save_location: Optional[str] = None, render_kwargs: Optional[Dict[str, Any]] = None) -> str: """Create project-specific cli.fif lib""" if not to_save_location: to_save_location: str = tempfile.mkstemp(suffix='.fif')[1] logger.info(f"👽 Save ton-cli to {to_save_location}") loader = FileSystemLoader(f"{project_root}/modules/fift") env = Environment( loader=loader, autoescape=select_autoescape() ) template = env.get_template(f"cli.fif.template") render_kwargs = {} if render_kwargs is None else render_kwargs if 'is_project' not in render_kwargs: render_kwargs['is_project'] = 0 rendered = template.render(**render_kwargs) with open(to_save_location, 'w', encoding='utf-8') as f: f.write(rendered) return to_save_location
0231433f94b129213de95ac50b406ede88860f23
10,823
def match(A, S, trueS): """Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)""" cov = np.cov(trueS, S) k = S.shape[0] corr = np.zeros([k, k]) for i in range(k): for j in range(k): corr[i][j] = cov[i + k][j] / np.sqrt(cov[i + k][i + k] * cov[j][j]) arrangement = linear_sum_assignment(-corr) resS = np.zeros_like(S) resAT = np.zeros_like(A.T) for t in range(k): resS[arrangement[1][t]] = S[arrangement[0][t]] resAT[arrangement[1][t]] = A.T[arrangement[0][t]] return resAT.T, resS
a0ec70ec768a1dfc610e8a5050d190a94266b307
10,824
def image_field_data(request, include_empty_option=False): """Returns a list of tuples of all images. Generates a sorted list of images available. And returns a list of (id, name) tuples. :param request: django http request object :param include_empty_option: flag to include a empty tuple in the front of the list :return: list of (id, name) tuples """ try: images = get_available_images(request, request.user.project_id) except Exception: exceptions.handle(request, _('Unable to retrieve images')) images.sort(key=lambda c: c.name) images_list = [('', _('Select Image'))] for image in images: image_label = u"{} ({})".format(image.name, sizeformat.diskgbformat(image.size)) images_list.append((image.id, image_label)) if not images: return [("", _("No images available")), ] return images_list
f209cbc9ae9aa18fd22e320fdc96ba97690f8a7d
10,825
def posts_completed(scraped_posts, limit): """Returns true if the amount of posts scraped from profile has reached its limit. """ if len(scraped_posts) == limit: return True else: return False
ff72474349a32f326b63b95070927c4b379be800
10,826
def mag(x): """Returns the absolute value squared of the input""" return np.abs(x)**2
bd081775a0b99e050287160cf3369faa819e20cf
10,827
def get_zero_columns(matrix): """ Returns a list of the columns which are all 0 """ rows = matrix.shape[0] columns = matrix.shape[1] result = [] for j in range(columns): is_zero_column = True for i in range(rows): is_zero_column = is_zero_column and matrix[i, j] == 0.0 result.append(is_zero_column) return result
35694592f4155f710e5ed3c2148a138591cd683f
10,828
def traditional_constants_icr_equation_empty_fixed(fixed_params, X_col): """ Traditional ICR equation with constants from ACE consensus """ a = 450 tdd = X_col[0] return a / tdd
2931e4b3592a94690d98b0cb4cb90f712ff4a449
10,829
def sort_completions_key(completion): """ sort completions according to their type Args: completion (jedi.api.classes.Completion): completion Returns: int: sorting order """ if completion.type == "function": return 2 elif completion.type == "instance": return 1 else: return 3
7bf767d908c83c11dafa5e0fd694bbb31a98c404
10,830
def _is_git_url_mismatch(mismatch_item): """Returns whether the given mismatch item is for a GitHub URL.""" _, (required, _) = mismatch_item return required.startswith('git')
b1c3cec3d8cf3c7d3ffa5c405522b1a08754223b
10,831
def from_url(url, output_path=None, options=None): """ Convert file of files from URLs to PDF document :param url: URL or list of URLs to be saved :param output_path: (optional) path to output PDF file. If not provided, PDF will be returned as string :param options: (optional) dict to configure pyppeteer page.pdf action Returns: output_path if provided else PDF Binary """ return async_to_sync(api_async.from_url)(url, output_path, options)
8543410dcfba9d44adc8939f3dc8be702f5e922b
10,832
from typing import Dict def parse_wmic_output(wmic_output: str) -> Dict[str, str]: """Parse output of wmic query See test cases. @param wmic_output: Output from wmic tool @return Dictionary with key/value from wmic""" try: non_blank_lines = [s for s in wmic_output.splitlines() if s] parsed = {non_blank_lines[0].rstrip(' '): non_blank_lines[1].rstrip(' ')} logger.debug("Parsed wmic output: {}".format(str(parsed))) except IndexError as error: logger.error(f"Failed to parse {wmic_output}") return {"": ""} return parsed
bce5195c484cafc80ef1d1e26b7fb598c20718aa
10,833
def parse_identifier(stream: TokenStream) -> expression.Identifier: """Read an identifier from the token stream. <ident>.<ident> <ident>["<ident>"] <ident>["<ident>"].<ident> <ident>[<ident --> int/str>] <ident>[<ident>.<ident --> int/str>] <ident>[<int>] <ident>[<int>].<ident> """ path: expression.IdentifierPath = [] while stream.current.type in IDENTIFIER_TOKENS: if stream.current.type == TOKEN_IDENTIFIER: path.append(IdentifierPathElement(stream.current.value)) elif stream.current.type == TOKEN_INTEGER: path.append(IdentifierPathElement(int(stream.current.value))) elif stream.current.type == TOKEN_LBRACKET: stream.next_token() # Eat open bracket if stream.current.type == TOKEN_STRING: path.append(IdentifierPathElement(stream.current.value)) elif stream.current.type == TOKEN_NEGATIVE: expect_peek(stream, TOKEN_INTEGER) stream.next_token() path.append(IdentifierPathElement(-int(stream.current.value))) elif stream.current.type == TOKEN_INTEGER: path.append(IdentifierPathElement(int(stream.current.value))) elif stream.current.type == TOKEN_IDENTIFIER: # Recursive call to parse_identifier. If it's not a string or # integer, anything inside a pair of square brackets could be # another identifier that resolves to a string or integer. path.append(parse_identifier(stream)) else: raise LiquidSyntaxError( f"invalid identifier, found {stream.current.type}" ) expect_peek(stream, TOKEN_RBRACKET) stream.next_token() # Eat close bracket elif stream.current.type == TOKEN_DOT: pass else: raise LiquidSyntaxError(f"invalid identifier, found {stream.current.type}") stream.next_token() stream.push(stream.current) return expression.Identifier(path)
0679a112a841d90d51806d83cd381aad7632c77b
10,834
from typing import List def cubemap_projection_matrices(from_point: Vector3D, far_plane: float) -> List[np.ndarray]: """ Create the required Cubemap projection matrices. This method is suitable for generating a Shadow Map. Simply speaking, this method generates 6 different camera matrices from the center of an imaginary cube and covers all surfaces without conflicting. Keyword arguments; from_point -- Imaginary camera location far_plane -- How far the camera is capable of seeing. (Effects performance!) """ def a2np(a: List[float]) -> np.ndarray: return np.array(a, dtype=np.float32) shadow_proj = pyrr.matrix44.create_perspective_projection(90.0, 1.0, 0.01, far_plane, np.float32) lightpos = np.array(list(from_point), dtype=np.float32)[:3] nx = pyrr.matrix44.create_look_at( lightpos, np.array( lightpos + a2np([-1.0, 0, 0]), dtype=np.float32, ), a2np([0, -1.0, 0]), dtype=np.float32, ) px = pyrr.matrix44.create_look_at( lightpos, np.array( lightpos + a2np([1, 0, 0]), dtype=np.float32, ), a2np([0, -1.0, 0]), dtype=np.float32, ) ny = pyrr.matrix44.create_look_at( lightpos, np.array( lightpos + a2np([0, -1, 0]), dtype=np.float32, ), a2np([0, 0, -1.0]), dtype=np.float32, ) py = pyrr.matrix44.create_look_at( lightpos, np.array( lightpos + a2np([0, 1, 0]), dtype=np.float32, ), a2np([0, 0, 1.0]), dtype=np.float32, ) pz = pyrr.matrix44.create_look_at( lightpos, np.array( lightpos + a2np([0, 0, 1]), dtype=np.float32, ), a2np([0, -1.0, 0]), dtype=np.float32, ) nz = pyrr.matrix44.create_look_at( lightpos, np.array( lightpos + a2np([0, 0, -1]), dtype=np.float32, ), a2np([0, -1.0, 0]), dtype=np.float32, ) return [ px.dot(shadow_proj), nx.dot(shadow_proj), py.dot(shadow_proj), ny.dot(shadow_proj), pz.dot(shadow_proj), nz.dot(shadow_proj), ]
e576aceec831df8267bff1c4de3cb7f0a58c3be7
10,835
from win32com.shell import shellcon, shell def loadOptionsFile(): """Find the .buildbot/FILENAME file. Crawl from the current directory up towards the root, and also look in ~/.buildbot . The first directory that's owned by the user and has the file we're looking for wins. Windows skips the owned-by-user test. @rtype: dict @return: a dictionary of names defined in the options file. If no options file was found, return an empty dict. """ here = os.path.abspath(os.getcwd()) if runtime.platformType == 'win32': # never trust env-vars, use the proper API appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0) home = os.path.join(appdata, "buildbot") else: home = os.path.expanduser("~/.buildbot") searchpath = [] toomany = 20 while True: searchpath.append(os.path.join(here, ".buildbot")) next = os.path.dirname(here) if next == here: break # we've hit the root here = next toomany -= 1 # just in case if toomany == 0: raise ValueError("Hey, I seem to have wandered up into the " "infinite glories of the heavens. Oops.") searchpath.append(home) localDict = {} for d in searchpath: if os.path.isdir(d): if runtime.platformType != 'win32': if os.stat(d)[stat.ST_UID] != os.getuid(): print "skipping %s because you don't own it" % d continue # security, skip other people's directories optfile = os.path.join(d, "options") if os.path.exists(optfile): try: f = open(optfile, "r") options = f.read() exec options in localDict except: print "error while reading %s" % optfile raise break for k in localDict.keys(): if k.startswith("__"): del localDict[k] return localDict
2674c6e37de32f673e4fb9aeb6bb11981bee23d0
10,836
def get_high_accuracy_voronoi_nodes(structure, rad_dict, probe_rad=0.1): """ Analyze the void space in the input structure using high accuracy voronoi decomposition. Calls Zeo++ for Voronoi decomposition. Args: structure: pymatgen.core.structure.Structure rad_dict (optional): Dictionary of radii of elements in structure. If not given, Zeo++ default values are used. Note: Zeo++ uses atomic radii of elements. For ionic structures, pass rad_dict with ionic radii probe_rad (optional): Sampling probe radius in Angstroms. Default is 0.1 A Returns: voronoi nodes as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure voronoi face centers as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure """ with ScratchDir('.'): name = "temp_zeo1" zeo_inp_filename = name + ".cssr" ZeoCssr(structure).write_file(zeo_inp_filename) rad_flag = True rad_file = name + ".rad" with open(rad_file, 'w+') as fp: for el in rad_dict.keys(): print("{} {}".format(el, rad_dict[el].real), file=fp) atmnet = AtomNetwork.read_from_CSSR( zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file) # vornet, vor_edge_centers, vor_face_centers = \ # atmnet.perform_voronoi_decomposition() red_ha_vornet = \ prune_voronoi_network_close_node(atmnet) # generate_simplified_highaccuracy_voronoi_network(atmnet) # get_nearest_largest_diameter_highaccuracy_vornode(atmnet) red_ha_vornet.analyze_writeto_XYZ(name, probe_rad, atmnet) voro_out_filename = name + '_voro.xyz' voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule species = ["X"] * len(voro_node_mol.sites) coords = [] prop = [] for site in voro_node_mol.sites: coords.append(list(site.coords)) prop.append(site.properties['voronoi_radius']) lattice = Lattice.from_parameters(structure.lattice.parameters) vor_node_struct = Structure( lattice, species, coords, coords_are_cartesian=True, to_unit_cell=True, site_properties={"voronoi_radius": prop}) return vor_node_struct
2f671f9c8a357bd82f364f767cd387fae2661979
10,837
def setUpBlobDetector(): """ Configure parameters for a cv2 blob detector, and returns the detector. """ params = cv2.SimpleBlobDetector_Params() params.minThreshold = 0 params.maxThreshold = 255 params.filterByArea = True params.minArea = 1500 params.maxArea = 25000 params.filterByCircularity = False params.filterByColor = False params.filterByConvexity = False params.filterByInertia = False detector = cv2.SimpleBlobDetector_create(params) return detector
d311f46d9b87d759edae0f15583c66dc31f80602
10,838
def raise_keymap(): """ ! @ # $ % || ^ & * ( ) DEL ESC || PGDN PGUP PSCR CAPS volup ENT reset || UP voldn super shift space bspc|| alt ent LEFT DOWN RGHT """ left = [ [KC.N1, KC.N2, KC.N3, KC.N4, KC.N5], [KC.F1, KC.F2, KC.F3, KC.F4, KC.F5], [KC.F11, KC.F12, KC.LPRN, KC.RPRN, KC.AMPR], [KC.NO, KC.INS, KC.LGUI, KC.LSFT, KC.SPC, KC.BSPC], ] right = [ [ KC.N6, KC.N7, KC.N8, KC.N9, KC.N0], [ KC.F6, KC.F7, KC.F8, KC.F9, KC.F10], [ KC.GRV, KC.LBRC, KC.RBRC, KC.PSLS, KC.BSLS], [KC.LALT, KC.ENT, KC.TRNS, KC.DOT, KC.PMNS, KC.EQL], ] return [left, right]
94beda8275f65f16353b12b22809138d0342f512
10,839
import click from typing import cast def sample_cmd() -> Command: """Useful for testing constraints against a variety of parameter kinds. Parameters have names that should make easy to remember their "kind" without the need for looking up this code.""" @cloup.command() # Optional arguments @click.argument('arg1', required=False) @click.argument('arg2', required=False) # Plain options without default @cloup.option('--str-opt') @cloup.option('--int-opt', type=int) @cloup.option('--bool-opt', type=bool) # Flags @cloup.option('--flag / --no-flag') @cloup.option('--flag2', is_flag=True) # Options with default @cloup.option('--def1', default=1) @cloup.option('--def2', default=2) # Options that take a tuple @cloup.option('--tuple', nargs=2, type=int) # Options that can be specified multiple times @cloup.option('--mul1', type=int, multiple=True) @cloup.option('--mul2', type=int, multiple=True) def f(**kwargs): print('It works') return cast(Command, f)
c5a8ed369d910872e52ef080707c8f0ae7436487
10,840
import inspect def get_linenos(obj): """Get an object’s line numbers in its source code file""" try: lines, start = inspect.getsourcelines(obj) except TypeError: # obj is an attribute or None return None, None except OSError: # obj listing cannot be found # This happens for methods that are not explicitly defined # such as the __init__ method for a dataclass return None, None else: return start, start + len(lines) - 1
248ad7e377995e03969d3f7e1ded88670d8b08ea
10,841
import random def create_solution_board(width=6, height=6): """Randomly generates a new board with width by height size """ if type(width) != int or type(height) != int: raise TypeError('Arguments must be int type') boxes = width * height if boxes % 2 != 0: raise ValueError('Number of boxes is not multiple of two') numbers = list(range(1, boxes // 2 + 1)) numbers = numbers + numbers random.shuffle(numbers) board = [] for index in range(height): board.append([]) for _ in range(width): random_number = numbers.pop() board[index].append(random_number) board[index] = board[index] return board
0b6e30d726cec61581d93c909761f80d739eb917
10,842
from pydft.poisson import _O_operator, _L_operator, _B_operator def _getE(s,R,W,V = None): """The sum of the energies for the states present in the solution. Args: s (list of int): The number of samples points along each basis vector. R (numpy.ndarray): The basis vectors for the unit cell. W (numpy.ndarray): A matrix containing the expansion coefficients for the wavefunctions Returns: E (numpy.ndarray): A vector of the energies at the sample points. """ if V == None: #pragma: no cover V = _sho_V O_t = _O_operator(s,R,W) U = np.dot(np.conj(W.T),_O_operator(s,R,W)) Vt = np.transpose(np.conj(_Vdual(s,R, V = V))) IW = _B_operator(s,R,W) Uinv = np.linalg.inv(U) IWU = _B_operator(s,R,np.dot(W,Uinv)) n = _diagouter(IW,IWU) Ew = np.trace(np.dot(np.conj(np.transpose(W)),_L_operator(s,R,np.dot(W,Uinv)))) E = (-1.)*Ew/2. + np.dot(Vt,n) return E
7759c68e5774f809cfac1038014144cabe5c9410
10,843
def get_gt_list(request): """ This view returns the list of groundtruths associated to a user and a specific configuration of institute, usecase and language. .js files: InfoAboutConfiguration.js DownloadGT.js""" groundTruths = 0 json_resp = {} ins = request.GET.get('inst',None) lang = request.GET.get('lang',None) use = request.GET.get('use',None) action = request.GET.get('action',None) token = request.GET.get('token',None) reptype = request.GET.get('reptype',None) annotation_mode = request.GET.get('annotation_mode','Human') if ins == '': ins = None if use == '': use = None if lang == '': lang = None if token == 'all': ns_robot = NameSpace.objects.get(ns_id='Robot') ns_human = NameSpace.objects.get(ns_id='Human') # rob_user = User.objects.get(username='Robot_user',ns_id=ns_robot) list_gt = GroundTruthLogFile.objects.filter(ns_id=ns_human).count() groundTruths = list_gt # gt_rob = GroundTruthLogFile.objects.filter(ns_id=ns_robot,username = rob_user) i = 0 # print(groundTruths) # for el in gt_rob: # gts = GroundTruthLogFile.objects.filter(ns_id=ns_robot,gt_type = el.gt_type,id_report = el.id_report_id,language = el.language).exclude(insertion_time = el.insertion_time) # gts_count = gts.count() # # print('count: '+str(i)+' '+str(gts.count())) # i = i+1 # groundTruths = groundTruths + gts_count else: with connection.cursor() as cursor: if reptype == 'reports': if annotation_mode == 'Human': cursor.execute( "SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN topic_has_document as t on t.id_report = r.id_report and r.language = t.language WHERE r.institute = COALESCE(%s,r.institute) AND t.name = %s AND r.language = COALESCE(%s,r.language) AND g.gt_type = %s AND g.ns_id = %s and r.institute != %s", [ins, use, lang, action, 'Human','PUBMED']) groundTruths = cursor.fetchone()[0] else: if annotation_mode == 'Human': cursor.execute( "SELECT COUNT(*) FROM report AS r INNER JOIN ground_truth_log_file AS g ON g.id_report = r.id_report AND g.language = r.language INNER JOIN topic_has_document as t on t.id_report = r.id_report and r.language = t.language WHERE t.name = %s AND r.language = %s AND g.gt_type = %s AND g.ns_id = %s and r.institute = %s", [use, 'english', action, 'Human','PUBMED']) groundTruths = cursor.fetchone()[0] json_resp['ground_truths'] = groundTruths # print(json_resp) return JsonResponse(json_resp)
46cb039c9811eac5a43c08776b59b8cef12c7133
10,844
from typing import Any from typing import MutableMapping from typing import Hashable def to_dict(item: Any) -> MutableMapping[Hashable, Any]: """Converts 'item' to a MutableMapping. Args: item (Any): item to convert to a MutableMapping. Raises: TypeError: if 'item' is a type that is not registered. Returns: MutableMapping: derived from 'item'. """ if isinstance(item, MutableMapping): return item else: raise TypeError( f'item cannot be converted because it is an unsupported type: ' f'{type(item).__name__}')
c3ba483bde73a35ed036debcc4b87575b1c8b962
10,845
import copy def node(*args, **kwargs): """ args[0] -- a XML tag args[1:] -- an array of children to append to the newly created node or if a unicode arg is supplied it will be used to make a text node kwargs -- attributes returns a xml.dom.minidom.Element """ blocked_attributes = ['tag'] tag = args[0] if len(args) > 0 else kwargs['tag'] args = args[1:] result = DetachableElement(tag) unicode_args = [u for u in args if type(u) == unicode] assert len(unicode_args) <= 1 parsed_string = False # kwargs is an xml attribute dictionary, # here we convert it to a xml.dom.minidom.Element for k, v in iter(kwargs.items()): if k in blocked_attributes: continue if k == 'toParseString': if v is True and len(unicode_args) == 1: parsed_string = True # Add this header string so parseString can be used? s = u'<?xml version="1.0" ?><'+tag+'>' + unicode_args[0]\ + u'</'+tag+'>' parsed_node = parseString(s.encode("utf-8")).documentElement # Move node's children to the result Element # discarding node's root for child in parsed_node.childNodes: result.appendChild(copy.deepcopy(child)) else: result.setAttribute(k, v) if len(unicode_args) == 1 and not parsed_string: text_node = PatchedText() text_node.data = unicode_args[0] result.appendChild(text_node) for n in args: if type(n) == int or type(n) == float or type(n) == bytes: text_node = PatchedText() text_node.data = unicode(n) result.appendChild(text_node) elif type(n) is not unicode: try: result.appendChild(n) except: raise Exception(type(n), n) return result
2a0f9a953d07a114e0a426f4225fb3c5076513ee
10,846
import subprocess import plistlib def get_volume_uuid(path: str) -> str: """Returns the volume UUID for the given path or None if not found""" try: output = subprocess.check_output(["diskutil", "info", "-plist", path]) plist = plistlib.loads(output) return plist.get("VolumeUUID", None) except subprocess.CalledProcessError as e: return None
d01d6a9232393013009d337a2b669e246e928b65
10,847
import math def mylog10(x): """Return the base-10 logarithm of x.""" return math.log10(x)
d32113c16047175125e1b79c9ce0ea8822e4853c
10,848
import numpy def get_RGB_to_RGB_matrix(in_colorspace, out_colorspace, primaries_only=False): """Return RGB to RGB conversion matrix. Args: in_colorspace (str): input colorspace. out_colorspace (str): output colorspace. Kwargs: primaries_only (bool): primaries matrix only, doesn't include white point. Returns: .numpy.matrix (3x3) """ # Get colorspace in to XYZ matrix in_matrix = get_colorspace_matrix(in_colorspace, primaries_only) # Get XYZ to colorspace out matrix out_matrix = get_colorspace_matrix(out_colorspace, primaries_only, inv=True) # Return scalar product of the 2 matrices return numpy.dot(out_matrix, in_matrix)
6c864fc45d254c38bc00a381f55dc3d2ad80aa9a
10,849
import re import string def normalize_string(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r'\b(a|an|the)\b', re.UNICODE) return re.sub(regex, ' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s))))
85a77dca1110460a1c445cc32f78cadb8c70ebd5
10,850
def sub_band_as_numpy(band, y_limits, data_type=None): """Read subsets of the dataset so that we don't hold the whole thing in memory. It seems wasteful to reread parts, but GDAL keeps its own cache. """ data_type = data_type if data_type else INT32 y_size = y_limits[1] - y_limits[0] LOGGER.debug(f"sub_band y_size={y_size} y_limits {y_limits[0]}") scanline_buffer = band.ReadRaster( xoff=0, yoff=y_limits[0], xsize=band.XSize, ysize=y_size, buf_xsize=band.XSize, buf_ysize=y_size, buf_type=data_type.gdal, ) scanline = np.frombuffer(scanline_buffer, dtype=data_type.numpy) return np.reshape(scanline, (band.XSize, y_size))
d27ac1f5f54179f5240d8160774ca0f722948982
10,851
def full_reverse(viewname, urlconf=None, args=None, kwargs=None, current_app=None, scheme=None, domain=None, subdomain=None): """ First, obtains the absolute path of the URL matching given ``viewname`` with its parameters. Then, prepends the path with the scheme name and the authority part (domain and subdomain) and returns it. Args:: viewname (str): Name of the URL pattern. urlconf (str): Path of the module containing URLconfs. args (list): Positional arguments of the URL pattern. kwargs (dict): Keyword arguments of the URL pattern. current_app (str): App identifier. scheme (str): Scheme name (commonly called protocol). domain (str): Domain name. subdomain (str): Subdomain name. Returns:: The full URL matching given view with its parameters. Examples:: >>> full_reverse('client-detail-view', args=[client.id]) 'http://example.com/clients/client/123/' >>> full_reverse('client-list-view', scheme='https', subdomain='admin') 'https://admin.example.com/clients/' Raises:: NoReverseMatch: If no URL pattern matches the given ``viewname``. ValueError: If both ``args`` and ``kwargs`` are given. """ location = reverse(viewname, urlconf, args, kwargs, current_app) return build_full_url(location, scheme, domain, subdomain)
b061cdc1369af0c60b95da58f262563e5ea93aa3
10,852
def get_object_or_none(model_class, **kwargs): """Identical to get_object_or_404, except instead of returning Http404, this returns None. """ try: return model_class.objects.get(**kwargs) except model_class.DoesNotExist: return None
d74b84e9186d9fb4faabb7eaa70f53672665d304
10,853
def GenerateTests(): """Generate all tests.""" filelist = [] for ii in range(len(_GROUPS)): filename = GenerateFilename(_GROUPS[ii]) filelist.append(filename) WriteTest(filename, ii, ii + 1) return filelist
1160454ae0fab7008051bf9d4f5d2b94a74888b9
10,854
def shift_df_generator(empty_df, day_lower_hr_lim, day_upper_hr_lim): """Generate day and night dataframe. Parameters ---------- empty_df : DataFrame A DataFrame with timestamp and 'Temperature (Celsius)' with all zeros. day_lower_hr_lim : int The lower hour limit that constitutes the start of the day shift. day_upper_hr_lim : int The upper hour limit that constitutes the end of the day shift. Returns ------- day_df : DataFrame A DataFrame containing only dayshift values. night_df : DataFrame A DataFrame containing only nightshift values. """ # Create 2 temporary dataframes (1 for dayshift, 1 for nightshift) day_df = empty_df.loc[(empty_df['Timestamp'].dt.hour >= day_lower_hr_lim) & (empty_df['Timestamp'].dt.hour < day_upper_hr_lim)] # Night dataframe will consist of rows with indices not taken by day_df night_df = empty_df[~empty_df.index.isin(day_df.index)] return day_df, night_df
cc8f3675d88dc920fd1762894c859cd93a523aab
10,855
import json import regex def json_loads(data, handle=False): """ 封装的json load :param data: :param handle: 补丁, False: 默认不特殊处理: True: 不走正则 :return: """ if handle: return json.loads(data.strip()) return json.loads(regex.sub(r"\\\\", data.strip()))
34156a594b203af041fba8da65601bb17da95a3e
10,856
def elina_linexpr0_size(linexpr): """ Return the size of an ElinaLinexpr0. Parameters ---------- linexpr : ElinaLinexpr0Ptr Pointer to the ElinaLinexpr0 that needs to be checked for its size. Returns ------- size_linexpr = c_size_t Size of the ElinaLinexpr0. """ size_linexpr = None try: elina_linexpr0_size_c = elina_auxiliary_api.elina_linexpr0_size elina_linexpr0_size_c.restype = c_size_t elina_linexpr0_size_c.argtypes = [ElinaLinexpr0Ptr] size_linexpr = elina_linexpr0_size_c(linexpr) except: print('Problem with loading/calling "elina_linexpr0_size" from "libelinaux.so"') print('Make sure you are passing ElinaLinexpr0Ptr to the function') return size_linexpr
b68a9874dd795876dae1ff2ffe3de98728e521a7
10,857
def _make_index(df, cols=META_IDX, unique=True): """Create an index from the columns/index of a dataframe or series""" def _get_col(c): try: return df.index.get_level_values(c) except KeyError: return df[c] index = list(zip(*[_get_col(col) for col in cols])) if unique: index = pd.unique(index) return pd.MultiIndex.from_tuples(index, names=tuple(cols))
4356de2531f150c80bc364315ebf547fd345967f
10,858
import json def handler(event, context): """ Lambda Handler. Returns Hello World and the event and context objects """ print(event) print(context) return { "body": json.dumps('Hello World!') }
561326fec784aa72a133b217f1e2cecaf12ec1ad
10,859
from typing import List def flatten_concat(tensors: List[tf.Tensor], batch_dims: int = 1) -> tf.Tensor: """Flatten given inputs and concatenate them.""" # tensors [(B, ...), (B, ...)] flattened: List[tf.Tensor] = list() # [(B, X), (B, Y) ...] for tensor in tensors: final_dim = -1 if all(i is not None for i in tensor.shape[batch_dims:]): # We know all the dimensions final_dim = tf.reduce_prod(tensor.shape[batch_dims:]) flat_tensor = tf.reshape( tensor, tf.concat([tf.shape(tensor)[:batch_dims], [final_dim]], 0) ) flattened.append(flat_tensor) return tf.concat(flattened, -1)
1a4b9bbf12f75aff43273a7f44c659b7afecdddc
10,860
def analyze_friends (names,phones,all_areacodes,all_places): """ names: tuple of names phones: tuple of phone numbers (cleaned) all_areacodes: tuple of area codes (3char ints) all_places: tuple of places Goal: Print out how many friends you have and every unique state """ # For TESTING MAKE THE PHONE NUMBER FIRST 3 DIGITS THE SAME AS THE AREA CODE # def get_unique_area_codes(): # """ # Returns a tuple of all unique area codes # """ # area_codes = () # for ph in phones: # if ph[0:3] not in area_codes: # area_codes += (ph[0:3],) # return area_codes def get_States(some_areacodes): """ some_areacodes: tuple of area codes Return a tuple of states ASSOCIATED with area codes """ states = () for ac in some_areacodes: if ac not in all_areacodes: states += ("BAD AREA CODE",) else: index = all_areacodes.index(ac) states += (all_places[index],) return states num_friends = len(names) # Gets number of friends # unique_areacodes = get_unique_area_codes() unique_states = get_States(all_areacodes) print("You have", num_friends, "friends!") print("They live in", unique_states) # Function ends with the print, no returns
b90f938c9c019dc331c38cafb36d1a7e0cb3f83f
10,861
def fast_autoregressive_predict_fn(context, seq_len): """Given a context, autoregressively generate the rest of a sine wave.""" core = hk.LSTM(32) dense = hk.Linear(1) state = core.initial_state(context.shape[0]) # Unroll over the context using `hk.dynamic_unroll`. # As before, we `hk.BatchApply` the Linear for efficiency. context_outs, state = hk.dynamic_unroll( core, context, state, time_major=False, ) context_outs = hk.BatchApply(dense)(context_outs) # Now, unroll one step at a time using the running recurrent state. ar_outs = [] x = context_outs[:, -1, :] times = range(seq_len - context.shape[1]) for _ in times: x, state = core(x, state) x = dense(x) ar_outs.append(x) ar_outs = jnp.stack(ar_outs) ar_outs = ar_outs.transpose(1, 0, 2) return jnp.concatenate([context_outs, ar_outs], axis=1)
bf61799a8f34045cb214fd68095e1b9346fc797f
10,862
def get_entities(corpus_name): """ Load the dataset from the filesystem corresponding to corpus_name (to see the list of allowed names, use utils.list_corpora() ), and extract all annotated entities. Returns a dict, in which each key is an entity type, which contains a list of entity mentions in the corpus. """ r = read_conll(corpus_name); data = list(r) data2 = [ [(w,iob) for ((w,p),iob) in d] for d in data] data3 = [i for u in data2 for i in u] tags = sentence_utils.get_tagset(data, with_prefix=True) taglist = set([t[2:] for t in list(tags) if t !='O']) entities = {} for key in taglist: entities[key] = [] data3.append((u'O',u'O')) ent = [] entitytype = 'None' for i,item in enumerate(data3[0:-1]): if item[1] != 'O': if item[1][0] == 'B': ent = [] ent.append(item[0]) else: # == I if item[1][0] != 'I': raise ValueError("Should be I") ent.append(item[0]) if data3[i+1][1][2:] != item[1][2:] or data3[i+1][1][0] == 'B': #print i, item entitytype = item[1][2:] entities[entitytype].append(' '.join(ent)) return entities
274d82c4d5ae978452aaa7cf3aae14a7b86b3030
10,863
def _reporthook(t): """``reporthook`` to use with ``urllib.request`` that prints the process of the download. Uses ``tqdm`` for progress bar. **Reference:** https://github.com/tqdm/tqdm """ last_b = [0] def inner(b: int = 1, bsize: int = 1, tsize: int = None): """ :param b: Number of blocks just transferred [default: 1]. :param bsize: Size of each block (in tqdm units) [default: 1]. :param tsize: Total size (in tqdm units). If [default: None] remains unchanged. """ if tsize is not None: t.total = tsize t.update((b - last_b[0]) * bsize) last_b[0] = b return inner
9a4d527ff0b964e4220db7a22a522657947e91cb
10,864
def serial_rx(sysclk, reset_n, n_stop_bits_i, half_baud_rate_tick_i, baud_rate_tick_i, recieve_i, data_o, ready_o): """ Serial This module implements a reciever serial interface Ports: ----- sysclk: sysclk input reset_n: reset input half_baud_rate_tick_i: half baud rate tick baud_rate_tick_i: the baud rate n_stop_bits_i: number of stop bits recieve_i: rx data_o: the data output in 1 byte ready_o: indicates data_o is valid ----- """ END_OF_BYTE = 7 state_reg = Signal(t_State.ST_WAIT_START_BIT) state = Signal(t_State.ST_WAIT_START_BIT) data_reg = Signal(intbv(0, min = 0, max = 256)) data = Signal(intbv(0, min = 0, max = 256)) ready_reg = Signal(bool(0)) ready = Signal(bool(0)) count_8_bits_reg = Signal(intbv(0, min = 0, max = 8)) count_8_bits = Signal(intbv(0, min = 0, max = 8)) count_stop_bits_reg = Signal(intbv(0, min = 0, max = 8)) count_stop_bits = Signal(intbv(0, min = 0, max = 8)) @always_comb def outputs(): data_o.next = data_reg ready_o.next = ready_reg @always_seq(sysclk.posedge, reset = reset_n) def sequential_process(): state_reg.next = state data_reg.next = data ready_reg.next = ready count_8_bits_reg.next = count_8_bits count_stop_bits_reg.next = count_stop_bits @always_comb def combinational_process(): state.next = state_reg data.next = data_reg ready.next = ready_reg count_8_bits.next = count_8_bits_reg count_stop_bits.next = count_stop_bits_reg if state_reg == t_State.ST_WAIT_START_BIT: ready.next = False if baud_rate_tick_i == True: if recieve_i == False: state.next = t_State.ST_GET_DATA_BITS elif state_reg == t_State.ST_GET_DATA_BITS: if baud_rate_tick_i == True: data.next[count_8_bits_reg] = recieve_i if count_8_bits_reg == END_OF_BYTE: count_8_bits.next = 0 state.next = t_State.ST_GET_STOP_BITS else: count_8_bits.next = count_8_bits_reg + 1 state.next = t_State.ST_GET_DATA_BITS elif state_reg == t_State.ST_GET_STOP_BITS: if baud_rate_tick_i == True: if count_stop_bits_reg == (n_stop_bits_i - 1): count_stop_bits.next = 0 ready.next = True state.next = t_State.ST_WAIT_START_BIT else: count_stop_bits.next = count_stop_bits_reg + 1 else: raise ValueError("Undefined State") return outputs, sequential_process, combinational_process
62f215644004b61738db9fd249f28a4abc1391ea
10,865
def zpad(x, l): """ Left zero pad value `x` at least to length `l`. >>> zpad('', 1) '\x00' >>> zpad('\xca\xfe', 4) '\x00\x00\xca\xfe' >>> zpad('\xff', 1) '\xff' >>> zpad('\xca\xfe', 2) '\xca\xfe' """ return b'\x00' * max(0, l - len(x)) + x
605aab22fa54f9df85397793c65d46dcf2ec3588
10,866
def clf2D_slope_intercept(coef=None, intercept=None, clf=None): """ Gets the slop an intercept for the separating hyperplane of a linear classifier fit on a two dimensional dataset. Parameters ---------- coef: The classification normal vector. intercept: The classifier intercept. clf: subclass of sklearn.linear_model.base.LinearClassifierMixin A sklearn classifier with attributes coef_ and intercept_ Output ------ slope, intercept """ if clf is not None: coef = clf.coef_.reshape(-1) intercept = float(clf.intercept_) else: assert coef is not None and intercept is not None slope = - coef[0] / coef[1] intercept = - intercept / coef[1] return slope, intercept
9376c34a3836ee028c4b0497e1088ddd50bb1fc6
10,867
from pathlib import Path import os def _get_imgpaths(datasets: list, verbose=True): """ get image paths Args: datasets (list): dataset names verbose (bool, optional): . Defaults to True. """ img_paths = [] for dname in datasets: img_dir = Path(dname.format(DATASETS_DIR=DATASETS_DIR)) assert img_dir.is_dir(), ANSI.errorstr(f'Cannot find {img_dir}!') img_names = os.listdir(img_dir) for imname in img_names: impath = str(img_dir / imname) img_paths.append(impath) if verbose: msg = f'Loaded {dname} from {ANSI.udlstr(img_dir)}: {len(img_names)} images.' print(msg) assert len(img_paths) > 0, 'No image path loaded' return img_paths
67de2541318f5348223bd79ceb0ab11e60d55f56
10,868
def build_driver_for_task(task): """Builds a composable driver for a given task. Starts with a `BareDriver` object, and attaches implementations of the various driver interfaces to it. They come from separate driver factories and are configurable via the database. :param task: The task containing the node to build a driver for. :returns: A driver object for the task. :raises: DriverNotFound if node.driver could not be found in the "ironic.hardware.types" namespaces. :raises: InterfaceNotFoundInEntrypoint if some node interfaces are set to invalid or unsupported values. :raises: IncompatibleInterface the requested implementation is not compatible with it with the hardware type. """ node = task.node hw_type = get_hardware_type(node.driver) check_and_update_node_interfaces(node, hw_type=hw_type) bare_driver = driver_base.BareDriver() _attach_interfaces_to_driver(bare_driver, node, hw_type) return bare_driver
5283b91e5a42fe7ebec20b91e0f1463abbc8b724
10,869
import torch def evaluate(eval_model, criterion, ntokens, data_source, cnf): """ Evaluates the training loss of the given model """ eval_model.eval() # Turn on the evaluation mode total_loss = 0.0 src_mask = generate_square_subsequent_mask(cnf.input_length).to(cnf.device) with torch.no_grad(): for i in range(0, data_source.size(0) - 1, cnf.input_length): data, targets = get_batch(data_source, i, cnf) if data.size(0) != cnf.input_length: src_mask = generate_square_subsequent_mask(data.size(0)).to(cnf.device) output = eval_model(data, src_mask) output_flat = output.view(-1, ntokens) total_loss += len(data) * criterion(output_flat, targets).item() return total_loss / (len(data_source) - 1)
4570f5e7751683157ca8f3155052e484a1b3962e
10,870
def km_to_meters(kilometers): """ (int or float) -> float Takes a distance in kilometers and returns the distance in meters. """ return kilometers * 1000.0
33e40914c9d2b10009889ebfcbc543863a9ca363
10,871
from typing import List from typing import Optional from typing import Dict from typing import Callable from typing import Any def build(plan: List[Step], instances_stock: Optional[Dict[Callable, Any]] = None): """ Build instances dictionary from a plan """ instances_stock = instances_stock or {} instances = {} for cls, kwargs_spec in plan: if cls in instances_stock: instances[cls] = instances_stock[cls] else: instances[cls] = cls(**kwargs_spec.kwargs(instances)) return instances
a1b3ecc98097d9a5d998cca1484b22a4b83124ca
10,872
def make_module_spec(options, weight_file): """Makes a module spec. Args: options: LM hyperparameters. weight_file: location of the hdf5 file with LM weights. Returns: A module spec object used for constructing a TF-Hub module. """ def module_fn(): """Spec function for a token embedding module.""" # init _bos_id = 256 _eos_id = 257 _bow_id = 258 _eow_id = 259 _pad_id = 260 _max_word_length = 50 _parallel_iterations = 10 _max_batch_size = 1024 id_dtype = tf.int32 id_nptype = np.int32 max_word_length = tf.constant(_max_word_length, dtype=id_dtype, name='max_word_length') version = tf.constant('from_dp_1', dtype=tf.string, name='version') # the charcter representation of the begin/end of sentence characters def _make_bos_eos(c): r = np.zeros([_max_word_length], dtype=id_nptype) r[:] = _pad_id r[0] = _bow_id r[1] = c r[2] = _eow_id return tf.constant(r, dtype=id_dtype) bos_ids = _make_bos_eos(_bos_id) eos_ids = _make_bos_eos(_eos_id) def token2ids(token): with tf.name_scope("token2ids_preprocessor"): char_ids = tf.decode_raw(token, tf.uint8, name='decode_raw2get_char_ids') char_ids = tf.cast(char_ids, tf.int32, name='cast2int_token') char_ids = tf.strided_slice(char_ids, [0], [max_word_length - 2], [1], name='slice2resized_token') ids_num = tf.shape(char_ids)[0] fill_ids_num = (_max_word_length - 2) - ids_num pads = tf.fill([fill_ids_num], _pad_id) bow_token_eow_pads = tf.concat([[_bow_id], char_ids, [_eow_id], pads], 0, name='concat2bow_token_eow_pads') return bow_token_eow_pads def sentence_tagging_and_padding(sen_dim): with tf.name_scope("sentence_tagging_and_padding_preprocessor"): sen = sen_dim[0] dim = sen_dim[1] extra_dim = tf.shape(sen)[0] - dim sen = tf.slice(sen, [0, 0], [dim, max_word_length], name='slice2sen') bos_sen_eos = tf.concat([[bos_ids], sen, [eos_ids]], 0, name='concat2bos_sen_eos') bos_sen_eos_plus_one = bos_sen_eos + 1 bos_sen_eos_pads = tf.pad(bos_sen_eos_plus_one, [[0, extra_dim], [0, 0]], "CONSTANT", name='pad2bos_sen_eos_pads') return bos_sen_eos_pads # Input placeholders to the biLM. tokens = tf.placeholder(shape=(None, None), dtype=tf.string, name='ph2tokens') sequence_len = tf.placeholder(shape=(None, ), dtype=tf.int32, name='ph2sequence_len') tok_shape = tf.shape(tokens) line_tokens = tf.reshape(tokens, shape=[-1], name='reshape2line_tokens') with tf.device('/cpu:0'): tok_ids = tf.map_fn( token2ids, line_tokens, dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations, name='map_fn2get_tok_ids') tok_ids = tf.reshape(tok_ids, [tok_shape[0], tok_shape[1], -1], name='reshape2tok_ids') with tf.device('/cpu:0'): sen_ids = tf.map_fn( sentence_tagging_and_padding, (tok_ids, sequence_len), dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations, name='map_fn2get_sen_ids') # Build the biLM graph. bilm = BidirectionalLanguageModel(options, str(weight_file), max_batch_size=_max_batch_size) embeddings_op = bilm(sen_ids) # Get an op to compute ELMo (weighted average of the internal biLM layers) elmo_output = weight_layers('elmo_output', embeddings_op, l2_coef=0.0) weighted_op = elmo_output['weighted_op'] mean_op = elmo_output['mean_op'] word_emb = elmo_output['word_emb'] lstm_outputs1 = elmo_output['lstm_outputs1'] lstm_outputs2 = elmo_output['lstm_outputs2'] hub.add_signature("tokens", {"tokens": tokens, "sequence_len": sequence_len}, {"elmo": weighted_op, "default": mean_op, "word_emb": word_emb, "lstm_outputs1": lstm_outputs1, "lstm_outputs2": lstm_outputs2, "version": version}) # #########################Next signature############################# # # Input placeholders to the biLM. def_strings = tf.placeholder(shape=(None), dtype=tf.string) def_tokens_sparse = tf.string_split(def_strings) def_tokens_dense = tf.sparse_to_dense(sparse_indices=def_tokens_sparse.indices, output_shape=def_tokens_sparse.dense_shape, sparse_values=def_tokens_sparse.values, default_value='' ) def_mask = tf.not_equal(def_tokens_dense, '') def_int_mask = tf.cast(def_mask, dtype=tf.int32) def_sequence_len = tf.reduce_sum(def_int_mask, axis=-1) def_tok_shape = tf.shape(def_tokens_dense) def_line_tokens = tf.reshape(def_tokens_dense, shape=[-1], name='reshape2line_tokens') with tf.device('/cpu:0'): def_tok_ids = tf.map_fn( token2ids, def_line_tokens, dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations, name='map_fn2get_tok_ids') def_tok_ids = tf.reshape(def_tok_ids, [def_tok_shape[0], def_tok_shape[1], -1], name='reshape2tok_ids') with tf.device('/cpu:0'): def_sen_ids = tf.map_fn( sentence_tagging_and_padding, (def_tok_ids, def_sequence_len), dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations, name='map_fn2get_sen_ids') # Get ops to compute the LM embeddings. def_embeddings_op = bilm(def_sen_ids) # Get an op to compute ELMo (weighted average of the internal biLM layers) def_elmo_output = weight_layers('elmo_output', def_embeddings_op, l2_coef=0.0, reuse=True) def_weighted_op = def_elmo_output['weighted_op'] def_mean_op = def_elmo_output['mean_op'] def_word_emb = def_elmo_output['word_emb'] def_lstm_outputs1 = def_elmo_output['lstm_outputs1'] def_lstm_outputs2 = def_elmo_output['lstm_outputs2'] hub.add_signature("default", {"strings": def_strings}, {"elmo": def_weighted_op, "default": def_mean_op, "word_emb": def_word_emb, "lstm_outputs1": def_lstm_outputs1, "lstm_outputs2": def_lstm_outputs2, "version": version}) return hub.create_module_spec(module_fn)
2293f00186438a6cc3318be6a25ab5223b8e9a91
10,873
import math def get_initial_scoreboard(): """ Retrieve the initial scoreboard (first pages of global and student views). If a user is logged in, the initial pages will instead be those on which that user appears, and their group scoreboards will also be returned. Returns: dict of scoreboard information """ def get_user_pos(scoreboard, tid): for pos, team in enumerate(scoreboard): if team["tid"] == tid: return pos return 1 user = None if api.user.is_logged_in(): user = api.user.get_user() result = {'tid': 0, 'groups': []} global_board = api.stats.get_all_team_scores(include_ineligible=True) result['global'] = { 'name': 'global', 'pages': math.ceil(len(global_board) / scoreboard_page_len), 'start_page': 1 } if user is None: result['global']['scoreboard'] = global_board[:scoreboard_page_len] else: result['tid'] = user['tid'] global_pos = get_user_pos(global_board, user["tid"]) start_slice = math.floor(global_pos / 50) * 50 result['global']['scoreboard'] = global_board[start_slice: start_slice + 50] result['global']['start_page'] = math.ceil((global_pos + 1) / 50) result['country'] = user["country"] student_board = api.stats.get_all_team_scores() student_pos = get_user_pos(student_board, user["tid"]) start_slice = math.floor(student_pos / 50) * 50 result['student'] = { 'name': 'student', 'pages': math.ceil(len(student_board) / scoreboard_page_len), 'scoreboard': student_board[start_slice:start_slice + 50], 'start_page': math.ceil((student_pos + 1) / 50), } for group in api.team.get_groups(user['tid']): # this is called on every scoreboard pageload and should be # cached to support large groups group_board = api.stats.get_group_scores(gid=group['gid']) group_pos = get_user_pos(group_board, user["tid"]) start_slice = math.floor(group_pos / 50) * 50 result['groups'].append({ 'gid': group['gid'], 'name': group['name'], 'scoreboard': group_board[start_slice:start_slice + 50], 'pages': math.ceil(len(group_board) / scoreboard_page_len), 'start_page': math.ceil((group_pos + 1) / 50), }) return result
3e5998a0cc94a6c99ca58336ef0a350a4170240e
10,874
async def resolve(qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN, tcp=False, source=None, raise_on_no_answer=True, source_port=0, lifetime=None, search=None, backend=None): """Query nameservers asynchronously to find the answer to the question. This is a convenience function that uses the default resolver object to make the query. See ``dns.asyncresolver.Resolver.resolve`` for more information on the parameters. """ return await get_default_resolver().resolve(qname, rdtype, rdclass, tcp, source, raise_on_no_answer, source_port, lifetime, search, backend)
90a79f18d5c8887cbede733e7e05778ea78b36eb
10,875
import re def compare_xml(want, got): """Tries to do a 'xml-comparison' of want and got. Plain string comparison doesn't always work because, for example, attribute ordering should not be important. Comment nodes are not considered in the comparison. Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py """ _norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+') def norm_whitespace(v): return _norm_whitespace_re.sub(' ', v) def child_text(element): return ''.join([c.data for c in element.childNodes if c.nodeType == Node.TEXT_NODE]) def children(element): return [c for c in element.childNodes if c.nodeType == Node.ELEMENT_NODE] def norm_child_text(element): return norm_whitespace(child_text(element)) def attrs_dict(element): return dict(element.attributes.items()) def check_element(want_element, got_element): if want_element.tagName != got_element.tagName: return False if norm_child_text(want_element) != norm_child_text(got_element): return False if attrs_dict(want_element) != attrs_dict(got_element): return False want_children = children(want_element) got_children = children(got_element) if len(want_children) != len(got_children): return False for want, got in zip(want_children, got_children): if not check_element(want, got): return False return True def first_node(document): for node in document.childNodes: if node.nodeType != Node.COMMENT_NODE: return node want, got = strip_quotes(want, got) want = want.replace('\\n', '\n') got = got.replace('\\n', '\n') # If the string is not a complete xml document, we may need to add a # root element. This allow us to compare fragments, like "<foo/><bar/>" if not want.startswith('<?xml'): wrapper = '<root>%s</root>' want = wrapper % want got = wrapper % got # Parse the want and got strings, and compare the parsings. want_root = first_node(parseString(want)) got_root = first_node(parseString(got)) return check_element(want_root, got_root)
6632c723c2461dcb34b7e4f0bca0b4d096b5def8
10,876
def _tf_equal(a, b): """Overload of "equal" for Tensors.""" return gen_math_ops.equal(a, b)
899cff2abe9613d798fb59190c1860ef6a6599d7
10,877
def faq(): """FAQ page for SciNet""" return render_template("faq.html")
67bbdcc713789f71b0506206ef8a4f2a56b3f1a1
10,878
def render_table(sheet, header, width, data, header_style, data_style, tt_id_style): """Рендерим страницу""" # Render table header for i in range(len(header)): sheet.write(0, i, header[i], header_style) sheet.col(i).width = width[i] sheet.row(1).height = 2500 # Render table data i = 1 for d in data: sheet.row(i + 1).height = 2500 cols = [i, 'name', 'location', 'link', 'theme'] for col in range(len(cols)): if col == 0: sheet.write(i, col, i, tt_id_style) elif col == 1: sheet.write(i, col, d[cols[col]], tt_id_style) else: try: if col == 9: sheet.write(i, col, (round((d[cols[col]] / 30), 2)), data_style) else: sheet.write(i, col, d[cols[col]], data_style) except KeyError: sheet.write(i, col, 0, data_style) i = i + 1 return sheet
bc181ff96319daef3cad10e5072124a6c43172a6
10,879
def texsafe(value): """ Returns a string with LaTeX special characters stripped/escaped out """ special = [ [ "\\xc5", 'A'], #'\\AA' [ "\\xf6", 'o'], [ "&", 'and'], #'\\"{o}' ] for char in ['\\', '^', '~', '%', "'", '"']: # these mess up things value = value.replace(char, '') for char in ['#','$','_', '{', '}', '<', '>']: # these can be escaped properly value = value.replace(char, '\\' + char) for char, new_char in special: value = eval(repr(value).replace(char, new_char)) return value
b40b60a34629f75dfdac298bd2937af52ef797b1
10,880
def match_against_host_software_profile(db_session, hostname, software_packages): """ Given a software package list, return an array of dictionaries indicating if the software package matches any software package defined in the host software profile package list. """ results = [] system_option = SystemOption.get(db_session) if system_option.check_host_software_profile: host = get_host(db_session, hostname) if host is not None and len(software_packages) > 0: software_profile = get_software_profile_by_id(db_session, host.software_profile_id) if software_profile is not None: software_profile_package_dict = get_matchable_package_dict(software_profile.packages.split(',')) software_package_dict = get_matchable_package_dict(software_packages) for software_package, pattern in software_package_dict.items(): matched = True if pattern in software_profile_package_dict.values() else False results.append({'software_package': software_package, 'matched': matched}) return results
30a1bbf8a548a9578324a60aa3bc18998457671a
10,881
from typing import Mapping from typing import Any def get_inputs_by_op(op: Op, store: Mapping[str, Any], copy_on_write: bool = False) -> Any: """Retrieve the necessary input data from the data dictionary in order to run an `op`. Args: op: The op to run. store: The system's data dictionary to draw inputs out of. copy_on_write: Whether to copy read-only data to make it writeable before returning it. Returns: Input data to be fed to the `op` forward function. """ if op.in_list: data = [] else: data = None if op.inputs: data = [] for key in op.inputs: elem = store[key] if copy_on_write and isinstance(elem, np.ndarray) and not elem.flags.writeable: elem = deepcopy(elem) store[key] = elem data.append(elem) if not op.in_list: data = data[0] return data
1f3ee5bfe98793c4e8002f2a7f7ea834bf0d93c0
10,882
from typing import Type def finalize_post(func, store: Type['ParameterStore']): """Finalizes the store prior to executing the function Parameters ---------- func : callable The function to wrap. store : ParameterStore The parameter store to finalize. Returns ------- callable The wrapped function. Raises ------ MissingParameterException If there's a parameter missing from the required parameters in the given `store`. """ @wraps(func) def wrapper(*args, **kwargs): ret = func(*args, **kwargs) if not store.final: store.finalize() return ret return wrapper
92195a0005b94dad7606609f99da4c824e39d5b1
10,883
def searchaftertext(filename, startterm, searchterm): """Start search after a certain text in a file""" #print startterm #print searchterm startline = findLastString (filename, startterm) searchtermfound = findLastString (filename, searchterm) if searchtermfound > startline: return True return False
32adc5bebab42ac721c04c8f16bceea53f9e0d79
10,884
def vec_list_to_tensor(vec_list): """Convert list to vector tensor.""" return jnp.stack(vec_list, axis=-1)
8e4dd60199c17dade87392f059412e00ae9defcc
10,885
from datetime import datetime def to_ecma_datetime_string(dt, default_timezone=local): """ Convert a python datetime into the string format defined by ECMA-262. See ECMA international standard: ECMA-262 section 15.9.1.15 ``assume_local_time`` if true will assume the date time is in local time if the object is a naive date time object; else assumes the time value is utc. """ assert isinstance(dt, datetime.datetime) dt = get_tz_aware_dt(dt, default_timezone).astimezone(utc) return "%4i-%02i-%02iT%02i:%02i:%02i.%03iZ" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond / 1000)
bec3a52976552a0c0cc9ff5afde5bbf5578ff020
10,886
def _logfile_readme() -> str: """Returns a string containing a 'how to read this logfile' message. Returns ------- str Returns a formatted paragraph-long message with tips on reading log file output. """ line1 = "Messages are displayed below in the format" line2 = " <DATE> <TIME> <LOGGER NAME> @ <FILE>:<LINE> - <LEVEL> - <FUNCTION>:<MESSAGE>" line3 = "where <DATE> is the date in 'YYYY-MM-DD' format, <TIME> is the time in 'HH:MM:SS,milliseconds' format, <LOGGER NAME> is the name of the logger that generated the message (which should be the __name__ of the file where the logger was initialized), <FILE> and <LINE> is the file name and line number where the message was generated, <LEVEL> is the priority level that the message was generated at, <FUNCTION> is the name of the function that the message was generated inside, and <MESSAGE> is the actual message that was generated. " message = f"{line1}\n\n{line2}\n\n{line3}\n\n" return message
5e418b20df1ebb486d0b1c3ecf38d6c72ae8a5a7
10,887
def taxon_lookup(es, body, index, taxonomy_index_template, opts, return_type): """Query elasticsearch for a taxon.""" taxa = [] with tolog.DisableLogger(): res = es.search_template(body=body, index=index, rest_total_hits_as_int=True) if "hits" in res and res["hits"]["total"] > 0: if return_type == "taxon_id": taxa = [hit["_source"]["taxon_id"] for hit in res["hits"]["hits"]] else: taxa = [hit for hit in res["hits"]["hits"]] else: template = taxonomy_index_template(opts["taxonomy-source"].lower(), opts) index = template["index_name"] with tolog.DisableLogger(): res = es.search_template( body=body, index=index, rest_total_hits_as_int=True ) if "hits" in res and res["hits"]["total"] > 0: if return_type == "taxon_id": taxa = [hit["_source"]["taxon_id"] for hit in res["hits"]["hits"]] else: taxa = [hit for hit in res["hits"]["hits"]] return taxa
52604947804581f633603d0728a68bc16f198503
10,888
async def get_south_services(request): """ Args: request: Returns: list of all south services with tracked assets and readings count :Example: curl -X GET http://localhost:8081/fledge/south """ if 'cached' in request.query and request.query['cached'].lower() == 'false': _get_installed_plugins.cache_clear() storage_client = connect.get_storage_async() cf_mgr = ConfigurationManager(storage_client) try: south_cat = await cf_mgr.get_category_child("South") south_categories = [nc["key"] for nc in south_cat] except: return web.json_response({'services': []}) response = await _services_with_assets(storage_client, cf_mgr, south_categories) return web.json_response({'services': response})
a134bcf3c899212afc4b805ddaa9a19db901578a
10,889
def filter_bam_file(bamfile, chromosome, outfile): """ filter_bam_file uses samtools to read a <bamfile> and read only the reads that are mapped to <chromosome>. It saves the filtered reads into <outfile>. """ inputs = [bamfile] outputs = [outfile] options = { 'cores': 1, 'memory': '4g', 'account': 'NChain', 'walltime': '01:00:00' } directory = "/".join(outfile.split("/")[:-1]) spec = ''' source /com/extra/samtools/1.6.0/load.sh mkdir -p {dirc} samtools view -b {infile} {chrom} > {out} '''.format(infile=bamfile, chrom=chromosome, out=outfile, dirc=directory) return inputs, outputs, options, spec
317e1283d4722483e4bc98080ef99abd9876d045
10,890
def import_teachers(): """ Import the teachers from Moodle. :return: Amount of imported users. :rtype: int """ course_list = dict(Course.objects.values_list("courseId", "pk")) teachers_list = parse_get_teachers(get_teachers(list(course_list.keys()))) teacher_group = create_auth_group() users = create_teachers(teachers_list) add_courses_and_group_to_users(course_list, teacher_group, teachers_list, users) return users.count()
25b03c5b79d348171d23bce54a67ebbab2911440
10,891
def baryvel(dje, deq): """ Calculate helio- and barycentric velocity. .. note:: The "JPL" option present in IDL is not provided here. Parameters ---------- dje : float Julian ephemeris date deq : float Epoch of mean equinox of helio- and barycentric velocity output. If `deq` is zero, `deq` is assumed to be equal to `dje`. Returns ------- dvelh : array Heliocentric velocity vector [km/s]. dvelb : array Barycentric velocity vector [km/s]. Notes ----- .. note:: This function was ported from the IDL Astronomy User's Library. :IDL - Documentation: pro baryvel, dje, deq, dvelh, dvelb, JPL = JPL NAME: BARYVEL PURPOSE: Calculates heliocentric and barycentric velocity components of Earth. EXPLANATION: BARYVEL takes into account the Earth-Moon motion, and is useful for radial velocity work to an accuracy of ~1 m/s. CALLING SEQUENCE: BARYVEL, dje, deq, dvelh, dvelb, [ JPL = ] INPUTS: DJE - (scalar) Julian ephemeris date. DEQ - (scalar) epoch of mean equinox of dvelh and dvelb. If deq=0 then deq is assumed to be equal to dje. OUTPUTS: DVELH: (vector(3)) heliocentric velocity component. in km/s DVELB: (vector(3)) barycentric velocity component. in km/s The 3-vectors DVELH and DVELB are given in a right-handed coordinate system with the +X axis toward the Vernal Equinox, and +Z axis toward the celestial pole. OPTIONAL KEYWORD SET: JPL - if /JPL set, then BARYVEL will call the procedure JPLEPHINTERP to compute the Earth velocity using the full JPL ephemeris. The JPL ephemeris FITS file JPLEPH.405 must exist in either the current directory, or in the directory specified by the environment variable ASTRO_DATA. Alternatively, the JPL keyword can be set to the full path and name of the ephemeris file. A copy of the JPL ephemeris FITS file is available in http://idlastro.gsfc.nasa.gov/ftp/data/ PROCEDURES CALLED: Function PREMAT() -- computes precession matrix JPLEPHREAD, JPLEPHINTERP, TDB2TDT - if /JPL keyword is set NOTES: Algorithm taken from FORTRAN program of Stumpff (1980, A&A Suppl, 41,1) Stumpf claimed an accuracy of 42 cm/s for the velocity. A comparison with the JPL FORTRAN planetary ephemeris program PLEPH found agreement to within about 65 cm/s between 1986 and 1994 If /JPL is set (using JPLEPH.405 ephemeris file) then velocities are given in the ICRS system; otherwise in the FK4 system. EXAMPLE: Compute the radial velocity of the Earth toward Altair on 15-Feb-1994 using both the original Stumpf algorithm and the JPL ephemeris IDL> jdcnv, 1994, 2, 15, 0, jd ;==> JD = 2449398.5 IDL> baryvel, jd, 2000, vh, vb ;Original algorithm ==> vh = [-17.07243, -22.81121, -9.889315] ;Heliocentric km/s ==> vb = [-17.08083, -22.80471, -9.886582] ;Barycentric km/s IDL> baryvel, jd, 2000, vh, vb, /jpl ;JPL ephemeris ==> vh = [-17.07236, -22.81126, -9.889419] ;Heliocentric km/s ==> vb = [-17.08083, -22.80484, -9.886409] ;Barycentric km/s IDL> ra = ten(19,50,46.77)*15/!RADEG ;RA in radians IDL> dec = ten(08,52,3.5)/!RADEG ;Dec in radians IDL> v = vb[0]*cos(dec)*cos(ra) + $ ;Project velocity toward star vb[1]*cos(dec)*sin(ra) + vb[2]*sin(dec) REVISION HISTORY: Jeff Valenti, U.C. Berkeley Translated BARVEL.FOR to IDL. W. Landsman, Cleaned up program sent by Chris McCarthy (SfSU) June 1994 Converted to IDL V5.0 W. Landsman September 1997 Added /JPL keyword W. Landsman July 2001 Documentation update W. Landsman Dec 2005 """ # Define constants dc2pi = 2 * np.pi cc2pi = 2 * np.pi dc1 = 1.0 dcto = 2415020.0 dcjul = 36525.0 # days in Julian year dcbes = 0.313 dctrop = 365.24219572 # days in tropical year (...572 insig) dc1900 = 1900.0 AU = 1.4959787e8 # Constants dcfel(i,k) of fast changing elements. dcfel = [1.7400353e00, 6.2833195099091e02, 5.2796e-6, 6.2565836e00, 6.2830194572674e02, -2.6180e-6, 4.7199666e00, 8.3997091449254e03, -1.9780e-5, 1.9636505e-1, 8.4334662911720e03, -5.6044e-5, 4.1547339e00, 5.2993466764997e01, 5.8845e-6, 4.6524223e00, 2.1354275911213e01, 5.6797e-6, 4.2620486e00, 7.5025342197656e00, 5.5317e-6, 1.4740694e00, 3.8377331909193e00, 5.6093e-6] dcfel = np.resize(dcfel, (8, 3)) # constants dceps and ccsel(i,k) of slowly changing elements. dceps = [4.093198e-1, -2.271110e-4, -2.860401e-8] ccsel = [1.675104e-2, -4.179579e-5, -1.260516e-7, 2.220221e-1, 2.809917e-2, 1.852532e-5, 1.589963e00, 3.418075e-2, 1.430200e-5, 2.994089e00, 2.590824e-2, 4.155840e-6, 8.155457e-1, 2.486352e-2, 6.836840e-6, 1.735614e00, 1.763719e-2, 6.370440e-6, 1.968564e00, 1.524020e-2, -2.517152e-6, 1.282417e00, 8.703393e-3, 2.289292e-5, 2.280820e00, 1.918010e-2, 4.484520e-6, 4.833473e-2, 1.641773e-4, -4.654200e-7, 5.589232e-2, -3.455092e-4, -7.388560e-7, 4.634443e-2, -2.658234e-5, 7.757000e-8, 8.997041e-3, 6.329728e-6, -1.939256e-9, 2.284178e-2, -9.941590e-5, 6.787400e-8, 4.350267e-2, -6.839749e-5, -2.714956e-7, 1.348204e-2, 1.091504e-5, 6.903760e-7, 3.106570e-2, -1.665665e-4, -1.590188e-7] ccsel = np.resize(ccsel, (17, 3)) # Constants of the arguments of the short-period perturbations. dcargs = [5.0974222e0, -7.8604195454652e2, 3.9584962e0, -5.7533848094674e2, 1.6338070e0, -1.1506769618935e3, 2.5487111e0, -3.9302097727326e2, 4.9255514e0, -5.8849265665348e2, 1.3363463e0, -5.5076098609303e2, 1.6072053e0, -5.2237501616674e2, 1.3629480e0, - 1.1790629318198e3, 5.5657014e0, -1.0977134971135e3, 5.0708205e0, -1.5774000881978e2, 3.9318944e0, 5.2963464780000e1, 4.8989497e0, 3.9809289073258e1, 1.3097446e0, 7.7540959633708e1, 3.5147141e0, 7.9618578146517e1, 3.5413158e0, -5.4868336758022e2] dcargs = np.resize(dcargs, (15, 2)) # Amplitudes ccamps(n,k) of the short-period perturbations. ccamps = \ [-2.279594e-5, 1.407414e-5, 8.273188e-6, 1.340565e-5, -2.490817e-7, -3.494537e-5, 2.860401e-7, 1.289448e-7, 1.627237e-5, -1.823138e-7, 6.593466e-7, 1.322572e-5, 9.258695e-6, -4.674248e-7, -3.646275e-7, 1.140767e-5, -2.049792e-5, -4.747930e-6, -2.638763e-6, -1.245408e-7, 9.516893e-6, -2.748894e-6, -1.319381e-6, -4.549908e-6, -1.864821e-7, 7.310990e-6, -1.924710e-6, -8.772849e-7, -3.334143e-6, -1.745256e-7, -2.603449e-6, 7.359472e-6, 3.168357e-6, 1.119056e-6, -1.655307e-7, -3.228859e-6, 1.308997e-7, 1.013137e-7, 2.403899e-6, -3.736225e-7, 3.442177e-7, 2.671323e-6, 1.832858e-6, -2.394688e-7, -3.478444e-7, 8.702406e-6, -8.421214e-6, -1.372341e-6, -1.455234e-6, -4.998479e-8, -1.488378e-6, -1.251789e-5, 5.226868e-7, -2.049301e-7, 0.e0, -8.043059e-6, -2.991300e-6, 1.473654e-7, -3.154542e-7, 0.e0, 3.699128e-6, -3.316126e-6, 2.901257e-7, 3.407826e-7, 0.e0, 2.550120e-6, -1.241123e-6, 9.901116e-8, 2.210482e-7, 0.e0, -6.351059e-7, 2.341650e-6, 1.061492e-6, 2.878231e-7, 0.e0] ccamps = np.resize(ccamps, (15, 5)) # Constants csec3 and ccsec(n,k) of the secular perturbations in longitude. ccsec3 = -7.757020e-8 ccsec = [1.289600e-6, 5.550147e-1, 2.076942e00, 3.102810e-5, 4.035027e00, 3.525565e-1, 9.124190e-6, 9.990265e-1, 2.622706e00, 9.793240e-7, 5.508259e00, 1.559103e01] ccsec = np.resize(ccsec, (4, 3)) # Sidereal rates. dcsld = 1.990987e-7 # sidereal rate in longitude ccsgd = 1.990969e-7 # sidereal rate in mean anomaly # Constants used in the calculation of the lunar contribution. cckm = 3.122140e-5 ccmld = 2.661699e-6 ccfdi = 2.399485e-7 # Constants dcargm(i,k) of the arguments of the perturbations of the motion # of the moon. dcargm = [5.1679830e0, 8.3286911095275e3, 5.4913150e0, - 7.2140632838100e3, 5.9598530e0, 1.5542754389685e4] dcargm = np.resize(dcargm, (3, 2)) # Amplitudes ccampm(n,k) of the perturbations of the moon. ccampm = [1.097594e-1, 2.896773e-7, 5.450474e-2, 1.438491e-7, -2.223581e-2, 5.083103e-8, 1.002548e-2, -2.291823e-8, 1.148966e-2, 5.658888e-8, 8.249439e-3, 4.063015e-8] ccampm = np.resize(ccampm, (3, 4)) # ccpamv(k)=a*m*dl,dt (planets), dc1mme=1-mass(earth+moon) ccpamv = [8.326827e-11, 1.843484e-11, 1.988712e-12, 1.881276e-12] dc1mme = 0.99999696e0 # Time arguments. dt = (dje - dcto) / dcjul tvec = np.array([1e0, dt, dt * dt]) # Values of all elements for the instant(aneous?) dje. temp = idlMod(np.dot(dcfel, tvec), dc2pi) dml = temp[0] forbel = temp[1:8] g = forbel[0] # old fortran equivalence deps = idlMod(np.sum(tvec * dceps), dc2pi) sorbel = idlMod(np.dot(ccsel, tvec), dc2pi) e = sorbel[0] # old fortran equivalence # Secular perturbations in longitude. dummy = np.cos(2.0) sn = np.sin(idlMod(np.dot(ccsec[::, 1:3], tvec[0:2]), cc2pi)) # Periodic perturbations of the emb (earth-moon barycenter). pertl = np.sum(ccsec[::, 0] * sn) + (dt * ccsec3 * sn[2]) pertld = 0.0 pertr = 0.0 pertrd = 0.0 for k in smo.range(15): a = idlMod((dcargs[k, 0] + dt * dcargs[k, 1]), dc2pi) cosa = np.cos(a) sina = np.sin(a) pertl = pertl + ccamps[k, 0] * cosa + ccamps[k, 1] * sina pertr = pertr + ccamps[k, 2] * cosa + ccamps[k, 3] * sina if k < 11: pertld = pertld + (ccamps[k, 1] * cosa - ccamps[k, 0] * sina) * ccamps[k, 4] pertrd = pertrd + (ccamps[k, 3] * cosa - ccamps[k, 2] * sina) * ccamps[k, 4] # Elliptic part of the motion of the emb. phi = (e * e / 4e0) * (((8e0 / e) - e) * np.sin(g) + 5 * np.sin(2 * g) + (13 / 3e0) * e * np.sin(3 * g)) f = g + phi sinf = np.sin(f) cosf = np.cos(f) dpsi = (dc1 - e * e) / (dc1 + e * cosf) phid = 2 * e * ccsgd * ((1 + 1.5 * e * e) * cosf + e * (1.25 - 0.5 * sinf * sinf)) psid = ccsgd * e * sinf / np.sqrt(dc1 - e * e) # Perturbed heliocentric motion of the emb. d1pdro = dc1 + pertr drd = d1pdro * (psid + dpsi * pertrd) drld = d1pdro * dpsi * (dcsld + phid + pertld) dtl = idlMod((dml + phi + pertl), dc2pi) dsinls = np.sin(dtl) dcosls = np.cos(dtl) dxhd = drd * dcosls - drld * dsinls dyhd = drd * dsinls + drld * dcosls # Influence of eccentricity, evection and variation on the geocentric # motion of the moon. pertl = 0.0 pertld = 0.0 pertp = 0.0 pertpd = 0.0 for k in smo.range(3): a = idlMod((dcargm[k, 0] + dt * dcargm[k, 1]), dc2pi) sina = np.sin(a) cosa = np.cos(a) pertl = pertl + ccampm[k, 0] * sina pertld = pertld + ccampm[k, 1] * cosa pertp = pertp + ccampm[k, 2] * cosa pertpd = pertpd - ccampm[k, 3] * sina # Heliocentric motion of the earth. tl = forbel[1] + pertl sinlm = np.sin(tl) coslm = np.cos(tl) sigma = cckm / (1.0 + pertp) a = sigma * (ccmld + pertld) b = sigma * pertpd dxhd = dxhd + a * sinlm + b * coslm dyhd = dyhd - a * coslm + b * sinlm dzhd = -sigma * ccfdi * np.cos(forbel[2]) # Barycentric motion of the earth. dxbd = dxhd * dc1mme dybd = dyhd * dc1mme dzbd = dzhd * dc1mme for k in smo.range(4): plon = forbel[k + 3] pomg = sorbel[k + 1] pecc = sorbel[k + 9] tl = idlMod((plon + 2.0 * pecc * np.sin(plon - pomg)), cc2pi) dxbd = dxbd + ccpamv[k] * (np.sin(tl) + pecc * np.sin(pomg)) dybd = dybd - ccpamv[k] * (np.cos(tl) + pecc * np.cos(pomg)) dzbd = dzbd - ccpamv[k] * sorbel[k + 13] * np.cos(plon - sorbel[k + 5]) # Transition to mean equator of date. dcosep = np.cos(deps) dsinep = np.sin(deps) dyahd = dcosep * dyhd - dsinep * dzhd dzahd = dsinep * dyhd + dcosep * dzhd dyabd = dcosep * dybd - dsinep * dzbd dzabd = dsinep * dybd + dcosep * dzbd # Epoch of mean equinox (deq) of zero implies that we should use # Julian ephemeris date (dje) as epoch of mean equinox. if deq == 0: dvelh = AU * np.array([dxhd, dyahd, dzahd]) dvelb = AU * np.array([dxbd, dyabd, dzabd]) return dvelh, dvelb # General precession from epoch dje to deq. deqdat = (dje - dcto - dcbes) / dctrop + dc1900 prema = np.transpose(premat(deqdat, deq, FK4=True)) dvelh = AU * np.dot([dxhd, dyahd, dzahd], prema) dvelb = AU * np.dot([dxbd, dyabd, dzabd], prema) return dvelh, dvelb
76f6dccceb697996541748704b293de6cfe77cf6
10,892
def uniform(low=0.0, high=1.0, size=None): """This function has the same `nlcpy.random.RandomState.uniform` See Also -------- nlcpy.random.RandomState.uniform : Draws samples from a uniform distribution. """ rs = generator._get_rand() return rs.uniform(low, high, size=size)
48de653a1721e5602eeefc2bf5182e0100759a31
10,893
def parse_time_interval(interval_str): """Convert a human-readable time interval to a tuple of start and end value. Args: interval_str: (`str`) A human-readable str representing an interval (e.g., "[10us, 20us]", "<100s", ">100ms"). Supported time suffixes are us, ms, s. Returns: `Interval` object where start and end are in microseconds. Raises: ValueError: if the input is not valid. """ str_interval = _parse_interval(interval_str) interval_start = 0 interval_end = float("inf") if str_interval.start: interval_start = parse_readable_time_str(str_interval.start) if str_interval.end: interval_end = parse_readable_time_str(str_interval.end) if interval_start > interval_end: raise ValueError( "Invalid interval %s. Start must be before end of interval." % interval_str) return Interval(interval_start, str_interval.start_included, interval_end, str_interval.end_included)
4edbc180722ddb84f6f2fae1e9854db14571f2d3
10,894
def submatrix(M, x): """If x is an array of integer row/col numbers and M a matrix, extract the submatrix which is the all x'th rows and cols. i.e. A = submatrix(M,x) => A_ij = M_{x_i}{x_j} """ return M[np.ix_(x,x)]
ba3aab45b77d8f7462fd0f2a29c96fb573618d62
10,895
def inventory_report(products: list) -> str: """Gives a detailed report on created products""" unique_names, average_price, average_weight, average_flam = _build_report_metrics(products) report = f'''ACME CORPORATION OFFICIAL INVENTORY REPORT Unique product names: {unique_names} Average price: {average_price} Average weight: {average_weight} Average flammability: {average_flam}''' return print(report)
96080f5aff04ae8d8578be3940f756b471fdce48
10,896
def iou(a, b): """ Calculates intersection over union (IOU) over two tuples """ (a_x1, a_y1), (a_x2, a_y2) = a (b_x1, b_y1), (b_x2, b_y2) = b a_area = (a_x2 - a_x1) * (a_y2 - a_y1) b_area = (b_x2 - b_x1) * (b_y2 - b_y1) dx = min(a_x2, b_x2) - max(a_x1, b_x1) dy = min(a_y2, b_y2) - max(a_y1, b_y1) if (dx>=0) and (dy>=0): overlap = dx * dy iou = overlap / (a_area + b_area - overlap) return iou return 0
0e72d00a672c430cce69246cb7d7889ae41ae216
10,897
def svn_path_is_empty(*args): """svn_path_is_empty(char path) -> int""" return _core.svn_path_is_empty(*args)
bf6db11940db6767c50a002104a528cf8c7a5363
10,898
import ast import _pytest.assertion.rewrite import argparse import os def main(args=None): """ CphdConsistency CLI tool. Prints results to stdout. Parameters ---------- args: None|List[str] List of CLI argument strings. If None use sys.argv """ parser = argparse.ArgumentParser(description="Analyze a CPHD and display inconsistencies") parser.add_argument('cphd_or_xml') parser.add_argument('-v', '--verbose', default=0, action='count', help="Increase verbosity (can be specified more than once >4 doesn't help)") parser.add_argument('--schema', help="Use a supplied schema file", default=DEFAULT_SCHEMA) parser.add_argument('--noschema', action='store_const', const=None, dest='schema', help="Disable schema checks") parser.add_argument('--signal-data', action='store_true', help="Check the signal data for NaN and +/- Inf") config = parser.parse_args(args) # Some questionable abuse of the pytest internals base, ext = os.path.splitext(__file__) # python2 can return the '*.pyc' file with open(base + '.py', 'r') as fd: source = fd.read() tree = ast.parse(source) try: _pytest.assertion.rewrite.rewrite_asserts(tree) except TypeError as e: _pytest.assertion.rewrite.rewrite_asserts(tree, source) co = compile(tree, __file__, 'exec', dont_inherit=True) ns = {} exec(co, ns) cphd_con = ns['CphdConsistency'].from_file(config.cphd_or_xml, config.schema, config.signal_data) cphd_con.check() failures = cphd_con.failures() cphd_con.print_result(fail_detail=config.verbose >= 1, include_passed_asserts=config.verbose >= 2, include_passed_checks=config.verbose >= 3, skip_detail=config.verbose >= 4) return bool(failures)
17b716b1060581d970b115230dc470bab0e307af
10,899