text
stringlengths
78
104k
score
float64
0
0.18
def is_LaTeX(flist,env,abspath): """Scan a file list to decide if it's TeX- or LaTeX-flavored.""" # We need to scan files that are included in case the # \documentclass command is in them. # get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS'] savedpath = modify_env_var(env, 'TEXINPUTS', abspath) paths = env['ENV']['TEXINPUTS'] if SCons.Util.is_List(paths): pass else: # Split at os.pathsep to convert into absolute path paths = paths.split(os.pathsep) # now that we have the path list restore the env if savedpath is _null: try: del env['ENV']['TEXINPUTS'] except KeyError: pass # was never set else: env['ENV']['TEXINPUTS'] = savedpath if Verbose: print("is_LaTeX search path ",paths) print("files to search :",flist) # Now that we have the search path and file list, check each one for f in flist: if Verbose: print(" checking for Latex source ",str(f)) content = f.get_text_contents() if LaTeX_re.search(content): if Verbose: print("file %s is a LaTeX file" % str(f)) return 1 if Verbose: print("file %s is not a LaTeX file" % str(f)) # now find included files inc_files = [ ] inc_files.extend( include_re.findall(content) ) if Verbose: print("files included by '%s': "%str(f),inc_files) # inc_files is list of file names as given. need to find them # using TEXINPUTS paths. # search the included files for src in inc_files: srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False) # make this a list since is_LaTeX takes a list. fileList = [srcNode,] if Verbose: print("FindFile found ",srcNode) if srcNode is not None: file_test = is_LaTeX(fileList, env, abspath) # return on first file that finds latex is needed. if file_test: return file_test if Verbose: print(" done scanning ",str(f)) return 0
0.009897
def data_msg( msg, mtype=None ): """ Return a Jupyter display_data message, in both HTML & text formats, by formatting a given single message. The passed message may be: * An exception (including a KrnlException): will generate an error message * A list of messages (with \c mtype equal to \c multi) * A single message @param msg (str,list): a string, or a list of format string + args, or an iterable of (msg,mtype) @param mtype (str): the message type (used for the CSS class). If it's \c multi, then \c msg will be treated as a multi-message. If not passed, \c krn-error will be used for exceptions and \c msg for everything else """ if isinstance(msg,KrnlException): return msg() # a KrnlException knows how to format itself elif isinstance(msg,Exception): return KrnlException(msg)() elif mtype == 'multi': return data_msglist( msg ) else: return data_msglist( [ (msg, mtype) ] )
0.018719
def add_node(self, node_descriptor): """Add a node to the sensor graph based on the description given. The node_descriptor must follow the sensor graph DSL and describe a node whose input nodes already exist. Args: node_descriptor (str): A description of the node to be added including its inputs, triggering conditions, processing function and output stream. """ if self._max_nodes is not None and len(self.nodes) >= self._max_nodes: raise ResourceUsageError("Maximum number of nodes exceeded", max_nodes=self._max_nodes) node, inputs, processor = parse_node_descriptor(node_descriptor, self.model) in_root = False for i, input_data in enumerate(inputs): selector, trigger = input_data walker = self.sensor_log.create_walker(selector) # Constant walkers begin life initialized to 0 so they always read correctly if walker.selector.inexhaustible: walker.reading = IOTileReading(0xFFFFFFFF, walker.selector.as_stream(), 0) node.connect_input(i, walker, trigger) if selector.input and not in_root: self.roots.append(node) in_root = True # Make sure we only add to root list once else: found = False for other in self.nodes: if selector.matches(other.stream): other.connect_output(node) found = True if not found and selector.buffered: raise NodeConnectionError("Node has input that refers to another node that has not been created yet", node_descriptor=node_descriptor, input_selector=str(selector), input_index=i) # Also make sure we add this node's output to any other existing node's inputs # this is important for constant nodes that may be written from multiple places # FIXME: Make sure when we emit nodes, they are topologically sorted for other_node in self.nodes: for selector, trigger in other_node.inputs: if selector.matches(node.stream): node.connect_output(other_node) # Find and load the processing function for this node func = self.find_processing_function(processor) if func is None: raise ProcessingFunctionError("Could not find processing function in installed packages", func_name=processor) node.set_func(processor, func) self.nodes.append(node)
0.004218
def repos(self, repo_type='public', organization='llnl'): """ Retrieves info about the repos of the current organization. """ print 'Getting repos.' for repo in self.org_retrieved.iter_repos(type=repo_type): #JSON json = repo.to_json() self.repos_json[repo.name] = json #CSV temp_repo = my_repo.My_Repo() temp_repo.name = repo.full_name self.total_repos += 1 temp_repo.contributors = my_github.get_total_contributors(repo) self.total_contributors += temp_repo.contributors temp_repo.forks = repo.forks_count self.total_forks += temp_repo.forks temp_repo.stargazers = repo.stargazers self.total_stars += temp_repo.stargazers temp_repo.pull_requests_open, temp_repo.pull_requests_closed = \ my_github.get_pull_reqs(repo) temp_repo.pull_requests = (temp_repo.pull_requests_open + temp_repo.pull_requests_closed) self.total_pull_reqs += temp_repo.pull_requests_open self.total_pull_reqs += temp_repo.pull_requests_closed self.total_pull_reqs_open += temp_repo.pull_requests_open self.total_pull_reqs_closed += temp_repo.pull_requests_closed temp_repo.open_issues = repo.open_issues_count self.total_open_issues += temp_repo.open_issues temp_repo.closed_issues = my_github.get_issues(repo, organization=organization) temp_repo.issues = temp_repo.closed_issues + temp_repo.open_issues self.total_closed_issues += temp_repo.closed_issues self.total_issues += temp_repo.issues my_github.get_languages(repo, temp_repo) temp_repo.readme = my_github.get_readme(repo) #temp_repo.license = my_github.get_license(repo) temp_repo.commits = self.get_commits(repo=repo, organization=organization) self.total_commits += temp_repo.commits self.all_repos.append(temp_repo)
0.003817
def pause(self, cause): """ Pause the current pipeline. :param cause: reason for pausing the pipeline. """ self._pipeline.pause(name=self.data.name, cause=cause)
0.009901
def unmount(self, path): """ Remove a mountpoint from the filesystem. """ del self._mountpoints[self._join_chunks(self._normalize_path(path))]
0.011494
def CallFlow(self, flow_name=None, next_state=None, request_data=None, client_id=None, base_session_id=None, **kwargs): """Creates a new flow and send its responses to a state. This creates a new flow. The flow may send back many responses which will be queued by the framework until the flow terminates. The final status message will cause the entire transaction to be committed to the specified state. Args: flow_name: The name of the flow to invoke. next_state: The state in this flow, that responses to this message should go to. request_data: Any dict provided here will be available in the RequestState protobuf. The Responses object maintains a reference to this protobuf for use in the execution of the state method. (so you can access this data by responses.request). There is no format mandated on this data but it may be a serialized protobuf. client_id: If given, the flow is started for this client. base_session_id: A URN which will be used to build a URN. **kwargs: Arguments for the child flow. Returns: The flow_id of the child flow which was created. Raises: ValueError: The requested next state does not exist. """ if not getattr(self, next_state): raise ValueError("Next state %s is invalid." % next_state) flow_request = rdf_flow_objects.FlowRequest( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, request_id=self.GetNextOutboundId(), next_state=next_state) if request_data is not None: flow_request.request_data = rdf_protodict.Dict().FromDict(request_data) self.flow_requests.append(flow_request) flow_cls = registry.FlowRegistry.FlowClassByName(flow_name) flow.StartFlow( client_id=self.rdf_flow.client_id, flow_cls=flow_cls, parent_flow_obj=self, **kwargs)
0.005908
def _mutect2_filter(broad_runner, in_file, out_file, ref_file): """Filter of MuTect2 calls, a separate step in GATK4. """ params = ["-T", "FilterMutectCalls", "--reference", ref_file, "--variant", in_file, "--output", out_file] return broad_runner.cl_gatk(params, os.path.dirname(out_file))
0.006536
def set_default_region(self, region): """ This sets the default region for detecting license plates. For example, setting region to "md" for Maryland or "fr" for France. :param region: A unicode/ascii string (Python 2/3) or bytes array (Python 3) :return: None """ region = _convert_to_charp(region) self._set_default_region_func(self.alpr_pointer, region)
0.007126
def _maybe_class_to_py_ast(_: GeneratorContext, node: MaybeClass) -> GeneratedPyAST: """Generate a Python AST node for accessing a potential Python module variable name.""" assert node.op == NodeOp.MAYBE_CLASS return GeneratedPyAST( node=ast.Name( id=Maybe(_MODULE_ALIASES.get(node.class_)).or_else_get(node.class_), ctx=ast.Load(), ) )
0.007576
def distance_inches_ping(self): """ Measurement of the distance detected by the sensor, in inches. The sensor will take a single measurement then stop broadcasting. If you use this property too frequently (e.g. every 100msec), the sensor will sometimes lock up and writing to the mode attribute will return an error. A delay of 250msec between each usage seems sufficient to keep the sensor from locking up. """ # This mode is special; setting the mode causes the sensor to send out # a "ping", but the mode isn't actually changed. self.mode = self.MODE_US_SI_IN return self.value(0) * self._scale('US_DIST_IN')
0.002729
def env_proxy_settings(selected_settings=None): """Get proxy settings from process environment variables. Get charm proxy settings from environment variables that correspond to juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, see lp:1782236) in a format suitable for passing to an application that reacts to proxy settings passed as environment variables. Some applications support lowercase or uppercase notation (e.g. curl), some support only lowercase (e.g. wget), there are also subjectively rare cases of only uppercase notation support. no_proxy CIDR and wildcard support also varies between runtimes and applications as there is no enforced standard. Some applications may connect to multiple destinations and expose config options that would affect only proxy settings for a specific destination these should be handled in charms in an application-specific manner. :param selected_settings: format only a subset of possible settings :type selected_settings: list :rtype: Option(None, dict[str, str]) """ SUPPORTED_SETTINGS = { 'http': 'HTTP_PROXY', 'https': 'HTTPS_PROXY', 'no_proxy': 'NO_PROXY', 'ftp': 'FTP_PROXY' } if selected_settings is None: selected_settings = SUPPORTED_SETTINGS selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() if k in selected_settings] proxy_settings = {} for var in selected_vars: var_val = os.getenv(var) if var_val: proxy_settings[var] = var_val proxy_settings[var.lower()] = var_val # Now handle juju-prefixed environment variables. The legacy vs new # environment variable usage is mutually exclusive charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) if charm_var_val: proxy_settings[var] = charm_var_val proxy_settings[var.lower()] = charm_var_val if 'no_proxy' in proxy_settings: if _contains_range(proxy_settings['no_proxy']): log(RANGE_WARNING, level=WARNING) return proxy_settings if proxy_settings else None
0.000461
def parse_neighbors(neighbors, vars=[]): """Convert a string of the form 'X: Y Z; Y: Z' into a dict mapping regions to neighbors. The syntax is a region name followed by a ':' followed by zero or more region names, followed by ';', repeated for each region name. If you say 'X: Y' you don't need 'Y: X'. >>> parse_neighbors('X: Y Z; Y: Z') {'Y': ['X', 'Z'], 'X': ['Y', 'Z'], 'Z': ['X', 'Y']} """ dict = DefaultDict([]) for var in vars: dict[var] = [] specs = [spec.split(':') for spec in neighbors.split(';')] for (A, Aneighbors) in specs: A = A.strip() dict.setdefault(A, []) for B in Aneighbors.split(): dict[A].append(B) dict[B].append(A) return dict
0.001318
def _sync_repo(self, repo_url: str, revision: str or None = None) -> Path: '''Clone a Git repository to the cache dir. If it has been cloned before, update it. :param repo_url: Repository URL :param revision: Revision: branch, commit hash, or tag :returns: Path to the cloned repository ''' repo_name = repo_url.split('/')[-1].rsplit('.', maxsplit=1)[0] repo_path = (self._cache_path / repo_name).resolve() self.logger.debug(f'Synchronizing with repo; URL: {repo_url}, revision: {revision}') try: self.logger.debug(f'Cloning repo {repo_url} to {repo_path}') run( f'git clone {repo_url} {repo_path}', shell=True, check=True, stdout=PIPE, stderr=STDOUT ) except CalledProcessError as exception: if repo_path.exists(): self.logger.debug('Repo already cloned; pulling from remote') try: run( 'git pull', cwd=repo_path, shell=True, check=True, stdout=PIPE, stderr=STDOUT ) except CalledProcessError as exception: self.logger.warning(str(exception)) else: self.logger.error(str(exception)) if revision: run( f'git checkout {revision}', cwd=repo_path, shell=True, check=True, stdout=PIPE, stderr=STDOUT ) return repo_path
0.002286
def options(self, parser, env): """Register commandline options. """ parser.add_option('--collect-only', action='store_true', dest=self.enableOpt, default=env.get('NOSE_COLLECT_ONLY'), help="Enable collect-only: %s [COLLECT_ONLY]" % (self.help()))
0.004988
def _page(q, chunk=1000): """ Quick utility to page a query, 1000 items at a time. We need this so we don't OOM (out of memory) ourselves loading the world. """ offset = 0 while True: r = False for elem in q.limit(chunk).offset(offset): r = True yield elem offset += chunk if not r: break
0.002653
def _get_xml_dom(self): """ Collects all options set so far, and produce and return an ``xml.dom.minidom.Document`` representing the corresponding XML. """ if self.site_control == SITE_CONTROL_NONE and \ any((self.domains, self.header_domains, self.identities)): raise TypeError(BAD_POLICY) policy_type = minidom.createDocumentType( qualifiedName='cross-domain-policy', publicId=None, systemId='http://www.adobe.com/xml/dtds/cross-domain-policy.dtd' ) policy = minidom.createDocument( None, 'cross-domain-policy', policy_type ) if self.site_control is not None: control_element = policy.createElement('site-control') control_element.setAttribute( 'permitted-cross-domain-policies', self.site_control ) policy.documentElement.appendChild(control_element) for elem_type in ('domains', 'header_domains', 'identities'): getattr(self, '_add_{}_xml'.format(elem_type))(policy) return policy
0.001695
def csv_to_matrix(csv_file_path): """Load a CSV file into a Python matrix of strings. Args: csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv) """ mtx = [] with open(csv_file_path) as csv_data_file: for row in csv_data_file: mtx.append(row.split(',')) return mtx
0.002976
def detunings_combinations(pairs): r"""Return all combinations of detunings. >>> Ne = 6 >>> Nl = 2 >>> omega_level = [0.0, 100.0, 100.0, 200.0, 200.0, 300.0] >>> xi = np.zeros((Nl, Ne, Ne)) >>> coup = [[(1, 0), (2, 0)], [(3, 0), (4, 0), (5, 0)]] >>> for l in range(Nl): ... for pair in coup[l]: ... xi[l, pair[0], pair[1]] = 1.0 ... xi[l, pair[1], pair[0]] = 1.0 >>> aux = define_simplification(omega_level, xi, Nl) >>> u, invu, omega_levelu, Neu, xiu = aux >>> pairs = detunings_indices(Neu, Nl, xiu) >>> detunings_combinations(pairs) [[(1, 0), (2, 0)], [(1, 0), (3, 0)]] """ def iter(pairs, combs, l): combs_n = [] for i in range(len(combs)): for j in range(len(pairs[l])): combs_n += [combs[i] + [pairs[l][j]]] return combs_n Nl = len(pairs) combs = [[pairs[0][k]] for k in range(len(pairs[0]))] for l in range(1, Nl): combs = iter(pairs, combs, 1) return combs
0.002904
def get_suggested_type_names( schema: GraphQLSchema, type_: GraphQLOutputType, field_name: str ) -> List[str]: """ Get a list of suggested type names. Go through all of the implementations of type, as well as the interfaces that they implement. If any of those types include the provided field, suggest them, sorted by how often the type is referenced, starting with Interfaces. """ if is_abstract_type(type_): type_ = cast(GraphQLAbstractType, type_) suggested_object_types = [] interface_usage_count: Dict[str, int] = defaultdict(int) for possible_type in schema.get_possible_types(type_): if field_name not in possible_type.fields: continue # This object type defines this field. suggested_object_types.append(possible_type.name) for possible_interface in possible_type.interfaces: if field_name not in possible_interface.fields: continue # This interface type defines this field. interface_usage_count[possible_interface.name] += 1 # Suggest interface types based on how common they are. suggested_interface_types = sorted( interface_usage_count, key=lambda k: -interface_usage_count[k] ) # Suggest both interface and object types. return suggested_interface_types + suggested_object_types # Otherwise, must be an Object type, which does not have possible fields. return []
0.000649
def compute_fd_hessian(fun, x0, epsilon, anagrad=True): """Compute the Hessian using the finite difference method Arguments: | ``fun`` -- the function for which the Hessian should be computed, more info below | ``x0`` -- the point at which the Hessian must be computed | ``epsilon`` -- a small scalar step size used to compute the finite differences Optional argument: | ``anagrad`` -- when True, analytical gradients are used [default=True] The function ``fun`` takes a mandatory argument ``x`` and an optional argument ``do_gradient``: | ``x`` -- the arguments of the function to be tested | ``do_gradient`` -- When False, only the function value is returned. When True, a 2-tuple with the function value and the gradient are returned [default=False] """ N = len(x0) def compute_gradient(x): if anagrad: return fun(x, do_gradient=True)[1] else: gradient = np.zeros(N, float) for i in range(N): xh = x.copy() xh[i] += 0.5*epsilon xl = x.copy() xl[i] -= 0.5*epsilon gradient[i] = (fun(xh)-fun(xl))/epsilon return gradient hessian = np.zeros((N,N), float) for i in range(N): xh = x0.copy() xh[i] += 0.5*epsilon xl = x0.copy() xl[i] -= 0.5*epsilon hessian[i] = (compute_gradient(xh) - compute_gradient(xl))/epsilon return 0.5*(hessian + hessian.transpose())
0.001176
def ask_user(prompt: str, default: str = None) -> Optional[str]: """ Prompts the user, with a default. Returns user input from ``stdin``. """ if default is None: prompt += ": " else: prompt += " [" + default + "]: " result = input(prompt) return result if len(result) > 0 else default
0.003049
def send_msg(self, connection, data): """ Function to send messages Parameters ---------- connection: socket or connection data: data that can be serialized to json """ # serialize as JSON msg = json.dumps(data) # Prefix each message with a 4-byte length (network byte order) msg = struct.pack('>I', len(msg)).decode() + msg connection.sendall(msg.encode()) return
0.00611
def xml_marshal_complete_multipart_upload(uploaded_parts): """ Marshal's complete multipart upload request based on *uploaded_parts*. :param uploaded_parts: List of all uploaded parts, ordered by part number. :return: Marshalled XML data. """ root = s3_xml.Element('CompleteMultipartUpload', {'xmlns': _S3_NAMESPACE}) for uploaded_part in uploaded_parts: part_number = uploaded_part.part_number part = s3_xml.SubElement(root, 'Part') part_num = s3_xml.SubElement(part, 'PartNumber') part_num.text = str(part_number) etag = s3_xml.SubElement(part, 'ETag') etag.text = '"' + uploaded_part.etag + '"' data = io.BytesIO() s3_xml.ElementTree(root).write(data, encoding=None, xml_declaration=False) return data.getvalue()
0.001175
def copy(self, src_url, dst_url): """Copy an S3 object to another S3 location.""" src_bucket, src_key = _parse_url(src_url) dst_bucket, dst_key = _parse_url(dst_url) if not dst_bucket: dst_bucket = src_bucket params = { 'copy_source': '/'.join((src_bucket, src_key)), 'bucket': dst_bucket, 'key': dst_key, } return self.call("CopyObject", **params)
0.004415
def cmp_code_objects(version, is_pypy, code_obj1, code_obj2, verify, name=''): """ Compare two code-objects. This is the main part of this module. """ # print code_obj1, type(code_obj2) assert iscode(code_obj1), \ "cmp_code_object first object type is %s, not code" % type(code_obj1) assert iscode(code_obj2), \ "cmp_code_object second object type is %s, not code" % type(code_obj2) # print dir(code_obj1) if isinstance(code_obj1, object): # new style classes (Python 2.2) # assume _both_ code objects to be new stle classes assert dir(code_obj1) == dir(code_obj2) else: # old style classes assert dir(code_obj1) == code_obj1.__members__ assert dir(code_obj2) == code_obj2.__members__ assert code_obj1.__members__ == code_obj2.__members__ if name == '__main__': name = code_obj1.co_name else: name = '%s.%s' % (name, code_obj1.co_name) if name == '.?': name = '__main__' if isinstance(code_obj1, object) and code_equal(code_obj1, code_obj2): # use the new style code-classes' __cmp__ method, which # should be faster and more sophisticated # if this compare fails, we use the old routine to # find out, what exactly is nor equal # if this compare succeds, simply return # return pass if isinstance(code_obj1, object): members = [x for x in dir(code_obj1) if x.startswith('co_')] else: members = dir(code_obj1) members.sort() # ; members.reverse() tokens1 = None for member in members: if member in __IGNORE_CODE_MEMBERS__ or verify != 'verify': pass elif member == 'co_code': if verify != 'strong': continue scanner = get_scanner(version, is_pypy, show_asm=False) global JUMP_OPS JUMP_OPS = list(scan.JUMP_OPS) + ['JUMP_BACK'] # use changed Token class # We (re)set this here to save exception handling, # which would get confusing. scanner.setTokenClass(Token) try: # ingest both code-objects tokens1, customize = scanner.ingest(code_obj1) del customize # save memory tokens2, customize = scanner.ingest(code_obj2) del customize # save memory finally: scanner.resetTokenClass() # restore Token class targets1 = dis.findlabels(code_obj1.co_code) tokens1 = [t for t in tokens1 if t.kind != 'COME_FROM'] tokens2 = [t for t in tokens2 if t.kind != 'COME_FROM'] i1 = 0; i2 = 0 offset_map = {}; check_jumps = {} while i1 < len(tokens1): if i2 >= len(tokens2): if len(tokens1) == len(tokens2) + 2 \ and tokens1[-1].kind == 'RETURN_VALUE' \ and tokens1[-2].kind == 'LOAD_CONST' \ and tokens1[-2].pattr is None \ and tokens1[-3].kind == 'RETURN_VALUE': break else: raise CmpErrorCodeLen(name, tokens1, tokens2) offset_map[tokens1[i1].offset] = tokens2[i2].offset for idx1, idx2, offset2 in check_jumps.get(tokens1[i1].offset, []): if offset2 != tokens2[i2].offset: raise CmpErrorCode(name, tokens1[idx1].offset, tokens1[idx1], tokens2[idx2], tokens1, tokens2) if tokens1[i1].kind != tokens2[i2].kind: if tokens1[i1].kind == 'LOAD_CONST' == tokens2[i2].kind: i = 1 while tokens1[i1+i].kind == 'LOAD_CONST': i += 1 if tokens1[i1+i].kind.startswith(('BUILD_TUPLE', 'BUILD_LIST')) \ and i == int(tokens1[i1+i].kind.split('_')[-1]): t = tuple([ elem.pattr for elem in tokens1[i1:i1+i] ]) if t != tokens2[i2].pattr: raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1], tokens2[i2], tokens1, tokens2) i1 += i + 1 i2 += 1 continue elif i == 2 and tokens1[i1+i].kind == 'ROT_TWO' and tokens2[i2+1].kind == 'UNPACK_SEQUENCE_2': i1 += 3 i2 += 2 continue elif i == 2 and tokens1[i1+i].kind in BIN_OP_FUNCS: f = BIN_OP_FUNCS[tokens1[i1+i].kind] if f(tokens1[i1].pattr, tokens1[i1+1].pattr) == tokens2[i2].pattr: i1 += 3 i2 += 1 continue elif tokens1[i1].kind == 'UNARY_NOT': if tokens2[i2].kind == 'POP_JUMP_IF_TRUE': if tokens1[i1+1].kind == 'POP_JUMP_IF_FALSE': i1 += 2 i2 += 1 continue elif tokens2[i2].kind == 'POP_JUMP_IF_FALSE': if tokens1[i1+1].kind == 'POP_JUMP_IF_TRUE': i1 += 2 i2 += 1 continue elif tokens1[i1].kind in ('JUMP_FORWARD', 'JUMP_BACK') \ and tokens1[i1-1].kind == 'RETURN_VALUE' \ and tokens2[i2-1].kind in ('RETURN_VALUE', 'RETURN_END_IF') \ and int(tokens1[i1].offset) not in targets1: i1 += 1 continue elif tokens1[i1].kind == 'JUMP_BACK' and tokens2[i2].kind == 'CONTINUE': # FIXME: should make sure that offset is inside loop, not outside of it i1 += 2 i2 += 2 continue elif tokens1[i1].kind == 'JUMP_FORWARD' and tokens2[i2].kind == 'JUMP_BACK' \ and tokens1[i1+1].kind == 'JUMP_BACK' and tokens2[i2+1].kind == 'JUMP_BACK' \ and int(tokens1[i1].pattr) == int(tokens1[i1].offset) + 3: if int(tokens1[i1].pattr) == int(tokens1[i1+1].offset): i1 += 2 i2 += 2 continue elif tokens1[i1].kind == 'LOAD_NAME' and tokens2[i2].kind == 'LOAD_CONST' \ and tokens1[i1].pattr == 'None' and tokens2[i2].pattr is None: pass elif tokens1[i1].kind == 'LOAD_GLOBAL' and tokens2[i2].kind == 'LOAD_NAME' \ and tokens1[i1].pattr == tokens2[i2].pattr: pass elif tokens1[i1].kind == 'LOAD_ASSERT' and tokens2[i2].kind == 'LOAD_NAME' \ and tokens1[i1].pattr == tokens2[i2].pattr: pass elif (tokens1[i1].kind == 'RETURN_VALUE' and tokens2[i2].kind == 'RETURN_END_IF'): pass elif (tokens1[i1].kind == 'BUILD_TUPLE_0' and tokens2[i2].pattr == ()): pass else: raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1], tokens2[i2], tokens1, tokens2) elif tokens1[i1].kind in JUMP_OPS and tokens1[i1].pattr != tokens2[i2].pattr: if tokens1[i1].kind == 'JUMP_BACK': dest1 = int(tokens1[i1].pattr) dest2 = int(tokens2[i2].pattr) if offset_map[dest1] != dest2: raise CmpErrorCode(name, tokens1[i1].offset, tokens1[i1], tokens2[i2], tokens1, tokens2) else: # import pdb; pdb.set_trace() try: dest1 = int(tokens1[i1].pattr) if dest1 in check_jumps: check_jumps[dest1].append((i1, i2, dest2)) else: check_jumps[dest1] = [(i1, i2, dest2)] except: pass i1 += 1 i2 += 1 del tokens1, tokens2 # save memory elif member == 'co_consts': # partial optimization can make the co_consts look different, # so we'll just compare the code consts codes1 = ( c for c in code_obj1.co_consts if hasattr(c, 'co_consts') ) codes2 = ( c for c in code_obj2.co_consts if hasattr(c, 'co_consts') ) for c1, c2 in zip(codes1, codes2): cmp_code_objects(version, is_pypy, c1, c2, verify, name=name) elif member == 'co_flags': flags1 = code_obj1.co_flags flags2 = code_obj2.co_flags if is_pypy: # For PYPY for now we don't care about PYPY_SOURCE_IS_UTF8: flags2 &= ~0x0100 # PYPY_SOURCE_IS_UTF8 # We also don't care about COROUTINE or GENERATOR for now flags1 &= ~0x000000a0 flags2 &= ~0x000000a0 if flags1 != flags2: raise CmpErrorMember(name, 'co_flags', pretty_flags(flags1), pretty_flags(flags2)) else: # all other members must be equal if getattr(code_obj1, member) != getattr(code_obj2, member): raise CmpErrorMember(name, member, getattr(code_obj1, member), getattr(code_obj2, member))
0.005122
def _instantiateFont(self, path): """ Return a instance of a font object with all the given subclasses""" try: return self.fontClass(path, layerClass=self.layerClass, libClass=self.libClass, kerningClass=self.kerningClass, groupsClass=self.groupsClass, infoClass=self.infoClass, featuresClass=self.featuresClass, glyphClass=self.glyphClass, glyphContourClass=self.glyphContourClass, glyphPointClass=self.glyphPointClass, glyphComponentClass=self.glyphComponentClass, glyphAnchorClass=self.glyphAnchorClass) except TypeError: # if our fontClass doesnt support all the additional classes return self.fontClass(path)
0.015312
def executemanycolumns(self, sql, columns): """ Execute an SQL command or query with multiple parameter sets that are passed in a column-wise fashion as opposed to the row-wise parameters in ``executemany()``. This function is a turbodbc-specific extension to PEP-249. :param sql: A (unicode) string that contains the SQL command or query. If you would like to use parameters, please use a question mark ``?`` at the location where the parameter shall be inserted. :param columns: An iterable of NumPy MaskedArrays. The Arrays represent the columnar parameter data, :return: The ``Cursor`` object to allow chaining of operations. """ self.rowcount = -1 self._assert_valid() self.impl.prepare(sql) if _has_arrow_support(): import pyarrow as pa if isinstance(columns, pa.Table): from turbodbc_arrow_support import set_arrow_parameters for column in columns.itercolumns(): if column.data.num_chunks != 1: raise NotImplementedError("Chunked Arrays are not yet supported") set_arrow_parameters(self.impl, columns) return self._execute() # Workaround to give users a better error message without a need # to import pyarrow if columns.__class__.__module__.startswith('pyarrow'): raise Error(_NO_ARROW_SUPPORT_MSG) if not _has_numpy_support(): raise Error(_NO_NUMPY_SUPPORT_MSG) _assert_numpy_column_preconditions(columns) from numpy.ma import MaskedArray from turbodbc_numpy_support import set_numpy_parameters split_arrays = [] for column in columns: if isinstance(column, MaskedArray): split_arrays.append((column.data, column.mask, str(column.dtype))) else: split_arrays.append((column, False, str(column.dtype))) set_numpy_parameters(self.impl, split_arrays) return self._execute()
0.004695
def _to_diagonally_dominant(mat): """Make matrix unweighted diagonally dominant using the Laplacian.""" mat += np.diag(np.sum(mat != 0, axis=1) + 0.01) return mat
0.005747
def _get_spades_circular_nodes(self, fastg): '''Returns set of names of nodes in SPAdes fastg file that are circular. Names will match those in spades fasta file''' seq_reader = pyfastaq.sequences.file_reader(fastg) names = set([x.id.rstrip(';') for x in seq_reader if ':' in x.id]) found_fwd = set() found_rev = set() for name in names: l = name.split(':') if len(l) != 2: continue if l[0] == l[1]: if l[0][-1] == "'": found_rev.add(l[0][:-1]) else: found_fwd.add(l[0]) return found_fwd.intersection(found_rev)
0.00578
def compiled_foreign_keys(self): """Returns compiled foreign key definitions""" def get_column_args(column): tmp = [] for arg_name, arg_val in column.items(): if arg_name not in ('name', 'type', 'reference'): if arg_name in ('server_default', 'server_onupdate'): arg_val = '"{}"'.format(arg_val) tmp.append(ALCHEMY_TEMPLATES.column_arg.safe_substitute(arg_name=arg_name, arg_val=arg_val)) return ", ".join(tmp) def get_fkey_args(column): table = column['reference']['table'] column = column['reference']['column'] return ALCHEMY_TEMPLATES.foreign_key_arg.safe_substitute(reference_table=table, reference_column=column) res = [] for column in self.foreign_key_definitions: column_args = get_column_args(column) column_type, type_params = ModelCompiler.get_col_type_info(column.get('type')) column_name = column.get('name') reference = get_fkey_args(column) if column_type in MUTABLE_DICT_TYPES: column_type = ALCHEMY_TEMPLATES.mutable_dict_type.safe_substitute(type=column_type, type_params=type_params) type_params = '' res.append( ALCHEMY_TEMPLATES.foreign_key.safe_substitute(column_name=column_name, column_type=column_type, column_args=column_args, foreign_key_args=reference, type_params=type_params)) join_string = "\n" + self.tab return join_string.join(res)
0.006494
def generalized_lsp_value(times, mags, errs, omega): '''Generalized LSP value for a single omega. The relations used are:: P(w) = (1/YY) * (YC*YC/CC + YS*YS/SS) where: YC, YS, CC, and SS are all calculated at T and where: tan 2omegaT = 2*CS/(CC - SS) and where: Y = sum( w_i*y_i ) C = sum( w_i*cos(wT_i) ) S = sum( w_i*sin(wT_i) ) YY = sum( w_i*y_i*y_i ) - Y*Y YC = sum( w_i*y_i*cos(wT_i) ) - Y*C YS = sum( w_i*y_i*sin(wT_i) ) - Y*S CpC = sum( w_i*cos(w_T_i)*cos(w_T_i) ) CC = CpC - C*C SS = (1 - CpC) - S*S CS = sum( w_i*cos(w_T_i)*sin(w_T_i) ) - C*S Parameters ---------- times,mags,errs : np.array The time-series to calculate the periodogram value for. omega : float The frequency to calculate the periodogram value at. Returns ------- periodogramvalue : float The normalized periodogram at the specified test frequency `omega`. ''' one_over_errs2 = 1.0/(errs*errs) W = npsum(one_over_errs2) wi = one_over_errs2/W sin_omegat = npsin(omega*times) cos_omegat = npcos(omega*times) sin2_omegat = sin_omegat*sin_omegat cos2_omegat = cos_omegat*cos_omegat sincos_omegat = sin_omegat*cos_omegat # calculate some more sums and terms Y = npsum( wi*mags ) C = npsum( wi*cos_omegat ) S = npsum( wi*sin_omegat ) CpS = npsum( wi*sincos_omegat ) CpC = npsum( wi*cos2_omegat ) CS = CpS - C*S CC = CpC - C*C SS = 1 - CpC - S*S # use SpS = 1 - CpC # calculate tau tan_omega_tau_top = 2.0*CS tan_omega_tau_bottom = CC - SS tan_omega_tau = tan_omega_tau_top/tan_omega_tau_bottom tau = nparctan(tan_omega_tau)/(2.0*omega) YpY = npsum( wi*mags*mags) YpC = npsum( wi*mags*cos_omegat ) YpS = npsum( wi*mags*sin_omegat ) # SpS = npsum( wi*sin2_omegat ) # the final terms YY = YpY - Y*Y YC = YpC - Y*C YS = YpS - Y*S periodogramvalue = (YC*YC/CC + YS*YS/SS)/YY return periodogramvalue
0.007637
def scroll_one_line_up(event): """ scroll_offset -= 1 """ w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name) b = event.cli.current_buffer if w: # When the cursor is at the bottom, move to the previous line. (Otherwise, only scroll.) if w.render_info: info = w.render_info if w.vertical_scroll > 0: first_line_height = info.get_height_for_line(info.first_visible_line()) cursor_up = info.cursor_position.y - (info.window_height - 1 - first_line_height - info.configured_scroll_offsets.bottom) # Move cursor up, as many steps as the height of the first line. # TODO: not entirely correct yet, in case of line wrapping and many long lines. for _ in range(max(0, cursor_up)): b.cursor_position += b.document.get_cursor_up_position() # Scroll window w.vertical_scroll -= 1
0.006699
def register_consumer(): """Given a hostname and port attempting to be accessed, return a unique consumer ID for accessing logs from the referenced container.""" global _consumers hostname, port = request.form['hostname'], request.form['port'] app_name = _app_name_from_forwarding_info(hostname, port) containers = get_dusty_containers([app_name], include_exited=True) if not containers: raise ValueError('No container exists for app {}'.format(app_name)) container = containers[0] new_id = uuid1() new_consumer = Consumer(container['Id'], datetime.utcnow()) _consumers[str(new_id)] = new_consumer response = jsonify({'app_name': app_name, 'consumer_id': new_id}) response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Methods'] = 'GET, POST' return response
0.001148
def WriteVarString(self, value, encoding="utf-8"): """ Write a string value to the stream. Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention Args: value (string): value to write to the stream. encoding (str): string encoding format. """ if type(value) is str: value = value.encode(encoding) length = len(value) ba = bytearray(value) byts = binascii.hexlify(ba) string = byts.decode(encoding) self.WriteVarInt(length) self.WriteBytes(string)
0.004739
def update_note(note, **kwargs): """ Update a note """ note_i = _get_note(note.id) if note.ref_key != note_i.ref_key: raise HydraError("Cannot convert a %s note to a %s note. Please create a new note instead."%(note_i.ref_key, note.ref_key)) note_i.set_ref(note.ref_key, note.ref_id) note_i.value = note.value db.DBSession.flush() return note_i
0.007634
def tvdb_login(api_key): """ Logs into TVDb using the provided api key Note: You can register for a free TVDb key at thetvdb.com/?tab=apiregister Online docs: api.thetvdb.com/swagger#!/Authentication/post_login= """ url = "https://api.thetvdb.com/login" body = {"apikey": api_key} status, content = _request_json(url, body=body, cache=False) if status == 401: raise MapiProviderException("invalid api key") elif status != 200 or not content.get("token"): raise MapiNetworkException("TVDb down or unavailable?") return content["token"]
0.001692
def disable_inheritance(path, objectType, copy=True): ''' Disable inheritance on an object Args: path: The path to the object objectType: The type of object (FILE, DIRECTORY, REGISTRY) copy: True will copy the Inherited ACEs to the DACL before disabling inheritance Returns (dict): A dictionary containing the results CLI Example: .. code-block:: bash salt 'minion-id' win_dacl.disable_inheritance c:\temp directory ''' dc = daclConstants() objectType = dc.getObjectTypeBit(objectType) path = dc.processPath(path, objectType) return _set_dacl_inheritance(path, objectType, False, copy, None)
0.002972
def list_pkgs(installed=True, attributes=True): ''' Lists installed packages. Due to how nix works, it defaults to just doing a ``nix-env -q``. :param bool installed: list only installed packages. This can be a very long list (12,000+ elements), so caution is advised. Default: True :param bool attributes: show the attributes of the packages when listing all packages. Default: True :return: Packages installed or available, along with their attributes. :rtype: list(list(str)) .. code-block:: bash salt '*' nix.list_pkgs salt '*' nix.list_pkgs installed=False ''' # We don't use -Q here, as it obfuscates the attribute names on full package listings. cmd = _nix_env() cmd.append('--query') if installed: # explicitly add this option for consistency, it's normally the default cmd.append('--installed') if not installed: cmd.append('--available') # We only show attributes if we're not doing an `installed` run. # The output of `nix-env -qaP` and `nix-env -qP` are vastly different: # `nix-env -qaP` returns a list such as 'attr.path name-version' # `nix-env -qP` returns a list of 'installOrder name-version' # Install order is useful to unambiguously select packages on a single # machine, but on more than one it can be a bad thing to specify. if attributes: cmd.append('--attr-path') out = _run(cmd) return [s.split() for s in salt.utils.itertools.split(out['stdout'], '\n')]
0.002483
def _AbortJoin(self, timeout=None): """Aborts all registered processes by joining with the parent process. Args: timeout (int): number of seconds to wait for processes to join, where None represents no timeout. """ for pid, process in iter(self._processes_per_pid.items()): logger.debug('Waiting for process: {0:s} (PID: {1:d}).'.format( process.name, pid)) process.join(timeout=timeout) if not process.is_alive(): logger.debug('Process {0:s} (PID: {1:d}) stopped.'.format( process.name, pid))
0.006969
async def brpoplpush(self, src, dst, timeout=0): """ Pop a value off the tail of ``src``, push it on the head of ``dst`` and then return it. This command blocks until a value is in ``src`` or until ``timeout`` seconds elapse, whichever is first. A ``timeout`` value of 0 blocks forever. """ if timeout is None: timeout = 0 return await self.execute_command('BRPOPLPUSH', src, dst, timeout)
0.004219
def all(self): """ Returns list with vids of all indexed partitions. """ partitions = [] query = text(""" SELECT dataset_vid, vid FROM partition_index;""") for result in self.backend.library.database.connection.execute(query): dataset_vid, vid = result partitions.append(PartitionSearchResult(dataset_vid=dataset_vid, vid=vid, score=1)) return partitions
0.006757
def _compute_schoenfeld_within_strata(self, X, T, E, weights): """ A positive value of the residual shows an X value that is higher than expected at that death time. """ # TODO: the diff_against is gross # This uses Efron ties. n, d = X.shape if not np.any(E): # sometimes strata have no deaths. This means nothing is returned # in the below code. return np.zeros((n, d)) # Init risk and tie sums to zero risk_phi, tie_phi = 0, 0 risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d)) # Init number of ties and weights weight_count = 0.0 tie_count = 0 scores = weights * np.exp(np.dot(X, self.hazards_)) diff_against = [] schoenfeld_residuals = np.empty((0, d)) # Iterate backwards to utilize recursive relationship for i in range(n - 1, -1, -1): # Doing it like this to preserve shape ti = T[i] ei = E[i] xi = X[i : i + 1] score = scores[i : i + 1] w = weights[i] # Calculate phi values phi_i = score phi_x_i = phi_i * xi # Calculate sums of Risk set risk_phi = risk_phi + phi_i risk_phi_x = risk_phi_x + phi_x_i # Calculate sums of Ties, if this is an event diff_against.append((xi, ei)) if ei: tie_phi = tie_phi + phi_i tie_phi_x = tie_phi_x + phi_x_i # Keep track of count tie_count += 1 # aka death counts weight_count += w if i > 0 and T[i - 1] == ti: # There are more ties/members of the risk set continue elif tie_count == 0: for _ in diff_against: schoenfeld_residuals = np.append(schoenfeld_residuals, np.zeros((1, d)), axis=0) diff_against = [] continue # There was atleast one event and no more ties remain. Time to sum. weighted_mean = np.zeros((1, d)) for l in range(tie_count): numer = risk_phi_x - l * tie_phi_x / tie_count denom = risk_phi - l * tie_phi / tie_count weighted_mean += numer / (denom * tie_count) for xi, ei in diff_against: schoenfeld_residuals = np.append(schoenfeld_residuals, ei * (xi - weighted_mean), axis=0) # reset tie values tie_count = 0 weight_count = 0.0 tie_phi = 0 tie_phi_x = np.zeros((1, d)) diff_against = [] return schoenfeld_residuals[::-1]
0.002886
def streamReachAndWatershed(self, delineate, out_stream_order_grid, out_network_connectivity_tree, out_network_coordinates, out_stream_reach_file, out_watershed_grid, pit_filled_elevation_grid=None, flow_dir_grid=None, contributing_area_grid=None, stream_raster_grid=None, outlet_shapefile=None ): """ Creates vector network and shapefile from stream raster grid """ log("PROCESS: StreamReachAndWatershed") if pit_filled_elevation_grid: self.pit_filled_elevation_grid = pit_filled_elevation_grid if flow_dir_grid: self.flow_dir_grid = flow_dir_grid if contributing_area_grid: self.contributing_area_grid = contributing_area_grid if stream_raster_grid: self.stream_raster_grid = stream_raster_grid # Construct the taudem command line. cmd = [os.path.join(self.taudem_exe_path, 'streamnet'), '-fel', self.pit_filled_elevation_grid, '-p', self.flow_dir_grid, '-ad8', self.contributing_area_grid, '-src', self.stream_raster_grid, '-ord', out_stream_order_grid, '-tree', out_network_connectivity_tree, '-coord', out_network_coordinates, '-net', out_stream_reach_file, '-w', out_watershed_grid, ] if outlet_shapefile: cmd += ['-o', outlet_shapefile] if delineate: cmd += ['-sw'] self._run_mpi_cmd(cmd) # create projection file self._add_prj_file(self.pit_filled_elevation_grid, out_stream_order_grid) self._add_prj_file(self.pit_filled_elevation_grid, out_stream_reach_file) self._add_prj_file(self.pit_filled_elevation_grid, out_watershed_grid)
0.006178
def keyword(self, **kwargs): """ Search for keywords by name. Args: query: CGI escpaed string. page: (optional) Minimum value of 1. Expected value is an integer. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('keyword') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
0.00432
def stage_tc_indicator_entity(self, indicator_data): """Convert JSON data to TCEntity. Args: indicator_data (str): [description] Returns: [type]: [description] """ path = '@.{value: summary, ' path += 'type: type, ' path += 'ownerName: ownerName, ' path += 'confidence: confidence || `0`, ' path += 'rating: rating || `0`}' return self.path_data(indicator_data, path)
0.004228
def map_event_code(event_code): """Map a specific event_code to an event group.""" event_code = int(event_code) # Honestly, these are just guessing based on the below event list. # It could be wrong, I have no idea. if 1100 <= event_code <= 1199: return ALARM_GROUP elif 3100 <= event_code <= 3199: return ALARM_END_GROUP elif 1300 <= event_code <= 1399: return PANEL_FAULT_GROUP elif 3300 <= event_code <= 3399: return PANEL_RESTORE_GROUP elif 1400 <= event_code <= 1499: return DISARM_GROUP elif 3400 <= event_code <= 3799: return ARM_GROUP elif 1600 <= event_code <= 1699: return TEST_GROUP elif 5000 <= event_code <= 5099: return CAPTURE_GROUP elif 5100 <= event_code <= 5199: return DEVICE_GROUP elif 5200 <= event_code <= 5299: return AUTOMATION_GROUP return None
0.001087
def describe(self): """Describes the method. :return: Description :rtype: dict[str, object] """ return { "name": self.name, "params": self.params, "returns": self.returns, "description": self.description, }
0.006601
def get_content_scoped_package(self, feed_id, package_scope, unscoped_package_name, package_version, **kwargs): """GetContentScopedPackage. [Preview API] :param str feed_id: :param str package_scope: :param str unscoped_package_name: :param str package_version: :rtype: object """ route_values = {} if feed_id is not None: route_values['feedId'] = self._serialize.url('feed_id', feed_id, 'str') if package_scope is not None: route_values['packageScope'] = self._serialize.url('package_scope', package_scope, 'str') if unscoped_package_name is not None: route_values['unscopedPackageName'] = self._serialize.url('unscoped_package_name', unscoped_package_name, 'str') if package_version is not None: route_values['packageVersion'] = self._serialize.url('package_version', package_version, 'str') response = self._send(http_method='GET', location_id='09a4eafd-123a-495c-979c-0eda7bdb9a14', version='5.0-preview.1', route_values=route_values, accept_media_type='application/octet-stream') if "callback" in kwargs: callback = kwargs["callback"] else: callback = None return self._client.stream_download(response, callback=callback)
0.005502
def fetch_hg_push_log(repo_name, repo_url): """ Run a HgPushlog etl process """ newrelic.agent.add_custom_parameter("repo_name", repo_name) process = HgPushlogProcess() process.run(repo_url + '/json-pushes/?full=1&version=2', repo_name)
0.003846
def get_results_as_xarray(self, parameter_space, result_parsing_function, output_labels, runs): """ Return the results relative to the desired parameter space in the form of an xarray data structure. Args: parameter_space (dict): The space of parameters to export. result_parsing_function (function): user-defined function, taking a result dictionary as argument, that can be used to parse the result files and return a list of values. output_labels (list): a list of labels to apply to the results dimensions, output by the result_parsing_function. runs (int): the number of runs to export for each parameter combination. """ np_array = np.array( self.get_space( self.db.get_complete_results(), {}, collections.OrderedDict([(k, v) for k, v in parameter_space.items()]), runs, result_parsing_function)) # Create a parameter space only containing the variable parameters clean_parameter_space = collections.OrderedDict( [(k, v) for k, v in parameter_space.items()]) clean_parameter_space['runs'] = range(runs) if isinstance(output_labels, list): clean_parameter_space['metrics'] = output_labels xr_array = xr.DataArray(np_array, coords=clean_parameter_space, dims=list(clean_parameter_space.keys())) return xr_array
0.002432
def _display_status(normalized_data, stream): """ print status message from docker-py stream. """ if 'Pull complete' in normalized_data['status'] or 'Download complete' in normalized_data['status']: stream.write("\n") if 'id' in normalized_data: stream.write("%s - " % normalized_data['id']) stream.write("{0}\n".format(normalized_data['status']))
0.005141
def prepare_mosaic(self, image, fov_deg, name=None): """Prepare a new (blank) mosaic image based on the pointing of the parameter image """ header = image.get_header() ra_deg, dec_deg = header['CRVAL1'], header['CRVAL2'] data_np = image.get_data() #dtype = data_np.dtype dtype = None self.bg_ref = iqcalc.get_median(data_np) # TODO: handle skew (differing rotation for each axis)? skew_limit = self.settings.get('skew_limit', 0.1) (rot_deg, cdelt1, cdelt2) = wcs.get_rotation_and_scale( header, skew_threshold=skew_limit) self.logger.debug("image0 rot=%f cdelt1=%f cdelt2=%f" % ( rot_deg, cdelt1, cdelt2)) # Prepare pixel scale for each axis px_scale = (math.fabs(cdelt1), math.fabs(cdelt2)) cdbase = [np.sign(cdelt1), np.sign(cdelt2)] reuse_image = self.settings.get('reuse_image', False) if (not reuse_image) or (self.img_mosaic is None): self.logger.debug("creating blank image to hold mosaic") self.fv.gui_do(self._prepare_mosaic1, "Creating blank image...") # GC old mosaic self.img_mosaic = None img_mosaic = dp.create_blank_image(ra_deg, dec_deg, fov_deg, px_scale, rot_deg, cdbase=cdbase, logger=self.logger, pfx='mosaic', dtype=dtype) if name is not None: img_mosaic.set(name=name) imname = img_mosaic.get('name', image.get('name', "NoName")) self.logger.debug("mosaic name is '%s'" % (imname)) # avoid making a thumbnail of this if seed image is also that way nothumb = not self.settings.get('make_thumbs', False) if nothumb: img_mosaic.set(nothumb=True) # image is not on disk, set indication for other plugins img_mosaic.set(path=None) # TODO: fill in interesting/select object headers from seed image self.img_mosaic = img_mosaic self.logger.info("adding mosaic image '%s' to channel" % (imname)) self.fv.gui_call(self.fv.add_image, imname, img_mosaic, chname=self.mosaic_chname) else: # <-- reuse image (faster) self.logger.debug("Reusing previous mosaic image") self.fv.gui_do( self._prepare_mosaic1, "Reusing previous mosaic image...") img_mosaic = dp.recycle_image(self.img_mosaic, ra_deg, dec_deg, fov_deg, px_scale, rot_deg, cdbase=cdbase, logger=self.logger, pfx='mosaic') header = img_mosaic.get_header() (rot, cdelt1, cdelt2) = wcs.get_rotation_and_scale( header, skew_threshold=skew_limit) self.logger.debug("mosaic rot=%f cdelt1=%f cdelt2=%f" % ( rot, cdelt1, cdelt2)) return img_mosaic
0.000877
def remove_old_dumps(connection, container: str, days=None): """Remove dumps older than x days """ if not days: return if days < 20: LOG.error('A minimum of 20 backups is stored') return options = return_file_objects(connection, container) for dt, o_info in options: now = datetime.datetime.now() delta = now - dt if delta.days > days: LOG.info('Deleting %s', o_info['name']) objectstore.delete_object(connection, container, o_info)
0.00188
def midpoint(self): """Calculate the midpoint between locations in segments. Returns: list of Point: Groups of midpoint between points in segments """ midpoints = [] for segment in self: if len(segment) < 2: midpoints.append([]) else: midpoints.append(segment.midpoint()) return midpoints
0.004938
def run_cli(argv=None): """ Calls :func:`wdiff` and prints the results to STDERR. Parses the options for :meth:`wdiff` with :func:`parse_commandline`. If *argv* is supplied, it is used as command line, else the actual one is used. Return Codes ------------ 0: okay 1: error with arguments 2: `wdiff` not found 3: error running `wdiff` """ args = parse_commandline(argv) try: context = get_context(args) settings = Settings(args.org_file, args.new_file, **context) results = wdiff( settings, args.wrap_with_html, args.fold_tags, args.hard_breaks ) print(results) return 0 except ContextError as err: print("ERROR: {}.".format(err), file=sys.stderr) return 1 except WdiffNotFoundError as err: print("ERROR: {}.".format(err), file=sys.stderr) return 2 except sub.CalledProcessError as err: print("ERROR: {}.".format(err), file=sys.stderr) return 3
0.007495
def get_common_name(self): ''' Get a flower's common name ''' name = random.choice(self.common_first) if random.randint(0, 1) == 1: name += ' ' + random.choice(self.common_first).lower() name += ' ' + random.choice(self.common_second).lower() return name
0.006536
def fetch(self, code, **kwargs): ''' Quandl entry point in datafeed object ''' log.debug('fetching QuanDL data (%s)' % code) # This way you can use your credentials even if # you didn't provide them to the constructor if 'authtoken' in kwargs: self.quandl_key = kwargs.pop('authtoken') # Harmonization: Quandl call start trim_start if 'start' in kwargs: kwargs['trim_start'] = kwargs.pop('start') if 'end' in kwargs: kwargs['trim_end'] = kwargs.pop('end') try: data = Quandl.get(code, authtoken=self.quandl_key, **kwargs) # FIXME With a symbol not found, insert a not_found column data.index = data.index.tz_localize(pytz.utc) except Exception, error: log.error('unable to fetch {}: {}'.format(code, error)) data = pd.DataFrame() return data
0.002121
def export_compound(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue): """ Export Compound TSV/CSV tables """ if format == "score_plots": export_score_plots(infile) else: if outfile is None: if outcsv: outfile = infile.split(".osw")[0] + ".csv" else: outfile = infile.split(".osw")[0] + ".tsv" else: outfile = outfile export_compound_tsv(infile, outfile, format, outcsv, max_rs_peakgroup_qvalue)
0.003817
def from_base(cls, base, repo): """ Create a :class:`DXF` object which uses the same host, settings and session as an existing :class:`DXFBase` object. :param base: Existing :class:`DXFBase` object. :type base: :class:`DXFBase` :param repo: Name of the repository to access on the registry. Typically this is of the form ``username/reponame`` but for your own registries you don't actually have to stick to that. :type repo: str :returns: :class:`DXF` object which shares configuration and session with ``base`` but which can also be used to operate on the ``repo`` repository. :rtype: :class:`DXF` """ # pylint: disable=protected-access r = cls(base._host, repo, base._auth, base._insecure, base._auth_host, base._tlsverify) r._token = base._token r._headers = base._headers r._sessions = [base._sessions[0]] return r
0.005269
def indent_iterable(elems: Sequence[str], num: int = 2) -> List[str]: """Indent an iterable.""" return [" " * num + l for l in elems]
0.014184
def _normalize_label(self, s, wsmap): """ normalized form of a synonym """ toks = [] for tok in list(set(self.npattern.sub(' ', s).split(' '))): if tok in wsmap: tok=wsmap[tok] if tok != "": toks.append(tok) toks.sort() return " ".join(toks)
0.008499
def search(self, *args, **kwargs): """ Search views. See Zendesk API `Reference <https://developer.zendesk.com/rest_api/docs/core/views#search-views>`__. :param args: query is the only accepted arg. :param kwargs: search parameters """ return self._get(self._build_url(self.endpoint.search(*args, **kwargs)))
0.011204
def angle(x0, y0, x1, y1): """ Returns the angle between two points. """ return degrees(atan2(y1-y0, x1-x0))
0.00813
def auth(username, password): ''' REST authentication ''' url = rest_auth_setup() data = {'username': username, 'password': password} # Post to the API endpoint. If 200 is returned then the result will be the ACLs # for this user result = salt.utils.http.query(url, method='POST', data=data, status=True, decode=True) if result['status'] == 200: log.debug('eauth REST call returned 200: %s', result) if result['dict'] is not None: return result['dict'] return True else: log.debug('eauth REST call failed: %s', result) return False
0.003026
def set_flow(self, flow): """Set the flow associated to this :class:`Work`.""" if not hasattr(self, "_flow"): self._flow = flow else: if self._flow != flow: raise ValueError("self._flow != flow")
0.007722
def _convert_distance_names_to_functions(distance): """ Convert function names in a composite distance function into function handles. """ dist_out = _copy.deepcopy(distance) for i, d in enumerate(distance): _, dist, _ = d if isinstance(dist, str): try: dist_out[i][1] = _tc.distances.__dict__[dist] except: raise ValueError("Distance '{}' not recognized.".format(dist)) return dist_out
0.004098
def get_session_not_on_or_after(self): """ Gets the SessionNotOnOrAfter from the AuthnStatement Could be used to set the local session expiration :returns: The SessionNotOnOrAfter value :rtype: time|None """ not_on_or_after = None authn_statement_nodes = self.__query_assertion('/saml:AuthnStatement[@SessionNotOnOrAfter]') if authn_statement_nodes: not_on_or_after = OneLogin_Saml2_Utils.parse_SAML_to_time(authn_statement_nodes[0].get('SessionNotOnOrAfter')) return not_on_or_after
0.006944
def save_shared_file(self, sharekey=None): """ Save a SharedFile to your Shake. Args: sharekey (str): Sharekey for the file to save. Returns: SharedFile saved to your shake. """ endpoint = '/api/sharedfile/{sharekey}/save'.format(sharekey=sharekey) data = self._make_request("POST", endpoint=endpoint, data=None) try: sf = SharedFile.NewFromJSON(data) sf.saved = True return sf except: raise Exception("{0}".format(data['error']))
0.005199
def main(argString=None): """The main function. The purpose of this module is to plot Eigenvectors provided by the Eigensoft software. Here are the steps of this module: 1. Reads the Eigenvector (:py:func:`read_eigenvalues`). 2. Plots the Scree Plot (:py:func:`create_scree_plot`). """ # Getting and checking the options args = parse_args(argString) check_args(args) # Reads the eigenvalues eigenvalues = read_eigenvalues(args.evec) # Creates the plot create_scree_plot(eigenvalues, args.out, args)
0.001786
def get_alignments(attention_matrix: np.ndarray, threshold: float = .9) -> Iterator[Tuple[int, int]]: """ Yields hard alignments from an attention_matrix (target_length, source_length) given a threshold. :param attention_matrix: The attention matrix. :param threshold: The threshold for including an alignment link in the result. :return: Generator yielding strings of the form 0-0, 0-1, 2-1, 2-2, 3-4... """ for src_idx in range(attention_matrix.shape[1]): for trg_idx in range(attention_matrix.shape[0]): if attention_matrix[trg_idx, src_idx] > threshold: yield (src_idx, trg_idx)
0.006144
def infohash_base32(self): """Base32 encoded SHA1 info hash""" self.validate() info = self.convert()[b'info'] return b32encode(sha1(bencode(info)).digest())
0.010638
def get_idxs(data, eid2idx): """ Convert from event IDs to event indices. :param data: an array with a field eid :param eid2idx: a dictionary eid -> idx :returns: the array of event indices """ uniq, inv = numpy.unique(data['eid'], return_inverse=True) idxs = numpy.array([eid2idx[eid] for eid in uniq])[inv] return idxs
0.002801
def db_dp010(self, value=None): """ Corresponds to IDD Field `db_dp010` mean coincident dry-bulb temperature to Dew-point temperature corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `db_dp010` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value """ if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_dp010`'.format(value)) self._db_dp010 = value
0.003619
def get_holding_accounts(self) -> List[Account]: """ Returns the (cached) list of holding accounts """ if not self.__holding_accounts: self.__holding_accounts = self.__get_holding_accounts_query().all() return self.__holding_accounts
0.007407
def save_sample_data(self): """Save values from the file's header row into the DataGrid columns after doing some very basic validation """ bsc = getToolByName(self, 'bika_setup_catalog') keywords = self.bika_setup_catalog.uniqueValuesFor('getKeyword') profiles = [] for p in bsc(portal_type='AnalysisProfile'): p = p.getObject() profiles.append(p.Title()) profiles.append(p.getProfileKey()) sample_data = self.get_sample_values() if not sample_data: return False # columns that we expect, but do not find, are listed here. # we report on them only once, after looping through sample rows. missing = set() # This contains all sample header rows that were not handled # by this code unexpected = set() # Save other errors here instead of sticking them directly into # the field, so that they show up after MISSING and before EXPECTED errors = [] # This will be the new sample-data field value, when we are done. grid_rows = [] row_nr = 0 for row in sample_data['samples']: row = dict(row) row_nr += 1 # sid is just for referring the user back to row X in their # in put spreadsheet gridrow = {'sid': row['Samples']} del (row['Samples']) # We'll use this later to verify the number against selections if 'Total number of Analyses or Profiles' in row: nr_an = row['Total number of Analyses or Profiles'] del (row['Total number of Analyses or Profiles']) else: nr_an = 0 try: nr_an = int(nr_an) except ValueError: nr_an = 0 # TODO this is ignored and is probably meant to serve some purpose. del (row['Price excl Tax']) # ContainerType - not part of sample or AR schema if 'ContainerType' in row: title = row['ContainerType'] if title: obj = self.lookup(('ContainerType',), Title=row['ContainerType']) if obj: gridrow['ContainerType'] = obj[0].UID del (row['ContainerType']) if 'SampleMatrix' in row: # SampleMatrix - not part of sample or AR schema title = row['SampleMatrix'] if title: obj = self.lookup(('SampleMatrix',), Title=row['SampleMatrix']) if obj: gridrow['SampleMatrix'] = obj[0].UID del (row['SampleMatrix']) # match against sample schema for k, v in row.items(): if k in ['Analyses', 'Profiles']: continue if k in sample_schema: del (row[k]) if v: try: value = self.munge_field_value( sample_schema, row_nr, k, v) gridrow[k] = value except ValueError as e: errors.append(e.message) # match against ar schema for k, v in row.items(): if k in ['Analyses', 'Profiles']: continue if k in ar_schema: del (row[k]) if v: try: value = self.munge_field_value( ar_schema, row_nr, k, v) gridrow[k] = value except ValueError as e: errors.append(e.message) # Count and remove Keywords and Profiles from the list gridrow['Analyses'] = [] for k, v in row.items(): if k in keywords: del (row[k]) if str(v).strip().lower() not in ('', '0', 'false'): gridrow['Analyses'].append(k) gridrow['Profiles'] = [] for k, v in row.items(): if k in profiles: del (row[k]) if str(v).strip().lower() not in ('', '0', 'false'): gridrow['Profiles'].append(k) if len(gridrow['Analyses']) + len(gridrow['Profiles']) != nr_an: errors.append( "Row %s: Number of analyses does not match provided value" % row_nr) grid_rows.append(gridrow) self.setSampleData(grid_rows) if missing: self.error("SAMPLES: Missing expected fields: %s" % ','.join(missing)) for thing in errors: self.error(thing) if unexpected: self.error("Unexpected header fields: %s" % ','.join(unexpected))
0.000583
def _ProcessSources(self, sources, parser_factory): """Iterates through sources yielding action responses.""" for source in sources: for action, request in self._ParseSourceType(source): yield self._RunClientAction(action, request, parser_factory, source.path_type)
0.006192
def close(self): """Close port.""" os.close(self.in_d) os.close(self.out_d)
0.020202
def title(self): """ Banana banana """ resolved_title = Link.resolving_title_signal(self) resolved_title = [elem for elem in resolved_title if elem is not None] if resolved_title: return str(resolved_title[0]) return self._title
0.006211
async def regions(self, *args, **kwargs): """ See the list of regions managed by this ec2-manager This method is only for debugging the ec2-manager This method is ``experimental`` """ return await self._makeApiCall(self.funcinfo["regions"], *args, **kwargs)
0.00974
def _glyph_for_monomer_pattern(self, pattern): """Add glyph for a PySB MonomerPattern.""" pattern.matches_key = lambda: str(pattern) agent_id = self._make_agent_id(pattern) # Handle sources and sinks if pattern.monomer.name in ('__source', '__sink'): return None # Handle molecules glyph = emaker.glyph(emaker.label(text=pattern.monomer.name), emaker.bbox(**self.monomer_style), class_('macromolecule'), id=agent_id) # Temporarily remove this # Add a glyph for type #type_glyph = emaker.glyph(emaker.label(text='mt:prot'), # class_('unit of information'), # emaker.bbox(**self.entity_type_style), # id=self._make_id()) #glyph.append(type_glyph) for site, value in pattern.site_conditions.items(): if value is None or isinstance(value, int): continue # Make some common abbreviations if site == 'phospho': site = 'p' elif site == 'activity': site = 'act' if value == 'active': value = 'a' elif value == 'inactive': value = 'i' state = emaker.state(variable=site, value=value) state_glyph = \ emaker.glyph(state, emaker.bbox(**self.entity_state_style), class_('state variable'), id=self._make_id()) glyph.append(state_glyph) return glyph
0.002414
def check_offset(self): """Check to see if initial position and goal are the same if they are, offset slightly so that the forcing term is not 0""" for d in range(self.dmps): if (self.y0[d] == self.goal[d]): self.goal[d] += 1e-4
0.007117
def set_exe(self, pipes_code): """ Dump launcher code to the distributed file system. """ if not self.output: raise RuntimeError("no output directory, can't create launcher") parent = hdfs.path.dirname(hdfs.path.abspath(self.output.rstrip("/"))) self.exe = hdfs.path.join(parent, utils.make_random_str()) hdfs.dump(pipes_code, self.exe)
0.00495
def NDP_Attack_DAD_DoS_via_NA(iface=None, mac_src_filter=None, tgt_filter=None, reply_mac=None): """ Perform the DAD DoS attack using NS described in section 4.1.3 of RFC 3756. This is done by listening incoming NS messages *sent from the unspecified address* and sending a NA reply for the target address, leading the peer to believe that another node is also performing DAD for that address. By default, the fake NA sent to create the DoS uses: - as target address the target address found in received NS. - as IPv6 source address: the target address found in received NS. - as IPv6 destination address: the link-local solicited-node multicast address derived from the target address in received NS. - the mac address of the interface as source (or reply_mac, see below). - the multicast mac address derived from the solicited node multicast address used as IPv6 destination address. - A Target Link-Layer address option (ICMPv6NDOptDstLLAddr) filled with the mac address used as source of the NA. Following arguments can be used to change the behavior: iface: a specific interface (e.g. "eth0") of the system on which the DoS should be launched. If None is provided conf.iface is used. mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on. Only NS messages received from this source will trigger replies. This allows limiting the effects of the DoS to a single target by filtering on its mac address. The default value is None: the DoS is not limited to a specific mac address. tgt_filter: Same as previous but for a specific target IPv6 address for received NS. If the target address in the NS message (not the IPv6 destination address) matches that address, then a fake reply will be sent, i.e. the emitter will be a target of the DoS. reply_mac: allow specifying a specific source mac address for the reply, i.e. to prevent the use of the mac address of the interface. This address will also be used in the Target Link-Layer Address option. """ def na_reply_callback(req, reply_mac, iface): """ Callback that reply to a NS with a NA """ # Let's build a reply and send it mac = req[Ether].src dst = req[IPv6].dst tgt = req[ICMPv6ND_NS].tgt rep = Ether(src=reply_mac) / IPv6(src=tgt, dst=dst) rep /= ICMPv6ND_NA(tgt=tgt, S=0, R=0, O=1) # noqa: E741 rep /= ICMPv6NDOptDstLLAddr(lladdr=reply_mac) sendp(rep, iface=iface, verbose=0) print("Reply NA for target address %s (received from %s)" % (tgt, mac)) _NDP_Attack_DAD_DoS(na_reply_callback, iface, mac_src_filter, tgt_filter, reply_mac)
0.000349
def get_star_names(self, modpath): """Returns all the names imported by 'import *' from a given module.""" if modpath not in self.star_names: print('Importing %s to resolve import *' % modpath, file=sys.stderr) try: module = self.import_module(modpath) except ImportError: print('ERROR: Failed to import %s!' % modpath, file=sys.stderr) self.star_names[modpath] = [] else: self.star_names[modpath] = sorted(getattr( module, '__all__', [name for name in dir(module) if not name.startswith('_')])) return self.star_names[modpath]
0.00569
def common_entitlements_options(f): """Add common options for entitlement commands.""" @click.option( "--show-tokens", default=False, is_flag=True, help="Show entitlement token string contents in output.", ) @click.pass_context @functools.wraps(f) def wrapper(ctx, *args, **kwargs): # pylint: disable=missing-docstring return ctx.invoke(f, *args, **kwargs) return wrapper
0.002227
def _get_key_with_evict(path, timestamp, passphrase): ''' Load a private key from disk. `timestamp` above is intended to be the timestamp of the file's last modification. This fn is memoized so if it is called with the same path and timestamp (the file's last modified time) the second time the result is returned from the memoiziation. If the file gets modified then the params are different and the key is loaded from disk. ''' log.debug('salt.crypt._get_key_with_evict: Loading private key') if HAS_M2: key = RSA.load_key(path, lambda x: six.b(passphrase)) else: with salt.utils.files.fopen(path) as f: key = RSA.importKey(f.read(), passphrase) return key
0.001366
def clear_optimizer(self): """Cleans query optimizer state""" self._optimized = False self._type2decls = {} self._type2name2decls = {} self._type2decls_nr = {} self._type2name2decls_nr = {} self._all_decls = None self._all_decls_not_recursive = None for decl in self.declarations: if isinstance(decl, scopedef_t): decl.clear_optimizer()
0.004577
def iter_final_matches(self, canonical_match, subject_graph, one_match): """Given a match, iterate over all related equivalent matches When criteria sets are defined, the iterator runs over all symmetric equivalent matches that fulfill one of the criteria sets. When not criteria sets are defined, the iterator only yields the input match. """ if self.criteria_sets is None or one_match: yield canonical_match else: for criteria_set in self.criteria_sets: satisfied_match_tags = set([]) for symmetry in self.pattern_graph.symmetries: final_match = canonical_match * symmetry #print final_match if criteria_set.test_match(final_match, self.pattern_graph, subject_graph): match_tags = tuple( self.vertex_tags.get(symmetry.reverse[vertex0]) for vertex0 in range(self.pattern_graph.num_vertices) ) if match_tags not in satisfied_match_tags: final_match.__dict__.update(criteria_set.info) yield final_match satisfied_match_tags.add(match_tags)
0.002941
def __fillablebox(msg, title="", default="", mask=None, image=None, root=None): """ Show a box in which a user can enter some text. You may optionally specify some default text, which will appear in the enterbox when it is displayed. Returns the text that the user entered, or None if he cancels the operation. """ global boxRoot, __enterboxText, __enterboxDefaultText global cancelButton, entryWidget, okButton if title is None: title == "" if default is None: default = "" __enterboxDefaultText = default __enterboxText = __enterboxDefaultText if root: root.withdraw() boxRoot = Toplevel(master=root) boxRoot.withdraw() else: boxRoot = Tk() boxRoot.withdraw() boxRoot.protocol('WM_DELETE_WINDOW', __enterboxQuit) boxRoot.title(title) boxRoot.iconname('Dialog') boxRoot.geometry(st.rootWindowPosition) boxRoot.bind("<Escape>", __enterboxCancel) # ------------- define the messageFrame --------------------------------- messageFrame = Frame(master=boxRoot) messageFrame.pack(side=TOP, fill=BOTH) # ------------- define the imageFrame --------------------------------- try: tk_Image = ut.load_tk_image(image) except Exception as inst: print(inst) tk_Image = None if tk_Image: imageFrame = Frame(master=boxRoot) imageFrame.pack(side=TOP, fill=BOTH) label = Label(imageFrame, image=tk_Image) label.image = tk_Image # keep a reference! label.pack(side=TOP, expand=YES, fill=X, padx='1m', pady='1m') # ------------- define the buttonsFrame --------------------------------- buttonsFrame = Frame(master=boxRoot) buttonsFrame.pack(side=TOP, fill=BOTH) # ------------- define the entryFrame --------------------------------- entryFrame = Frame(master=boxRoot) entryFrame.pack(side=TOP, fill=BOTH) # ------------- define the buttonsFrame --------------------------------- buttonsFrame = Frame(master=boxRoot) buttonsFrame.pack(side=TOP, fill=BOTH) # -------------------- the msg widget ---------------------------- messageWidget = Message(messageFrame, width="4.5i", text=msg) messageWidget.configure( font=(st.PROPORTIONAL_FONT_FAMILY, st.PROPORTIONAL_FONT_SIZE)) messageWidget.pack(side=RIGHT, expand=1, fill=BOTH, padx='3m', pady='3m') # --------- entryWidget ---------------------------------------------- entryWidget = Entry(entryFrame, width=40) bindArrows(entryWidget) entryWidget.configure( font=(st.PROPORTIONAL_FONT_FAMILY, st.TEXT_ENTRY_FONT_SIZE)) if mask: entryWidget.configure(show=mask) entryWidget.pack(side=LEFT, padx="3m") entryWidget.bind("<Return>", __enterboxGetText) entryWidget.bind("<Escape>", __enterboxCancel) # put text into the entryWidget entryWidget.insert(0, __enterboxDefaultText) # ------------------ ok button ------------------------------- okButton = Button(buttonsFrame, takefocus=1, text="OK") bindArrows(okButton) okButton.pack( expand=1, side=LEFT, padx='3m', pady='3m', ipadx='2m', ipady='1m') # for the commandButton, bind activation events to the activation event # handler commandButton = okButton handler = __enterboxGetText for selectionEvent in st.STANDARD_SELECTION_EVENTS: commandButton.bind("<{}>".format(selectionEvent), handler) # ------------------ cancel button ------------------------------- cancelButton = Button(buttonsFrame, takefocus=1, text="Cancel") bindArrows(cancelButton) cancelButton.pack( expand=1, side=RIGHT, padx='3m', pady='3m', ipadx='2m', ipady='1m') # for the commandButton, bind activation events to the activation event # handler commandButton = cancelButton handler = __enterboxCancel for selectionEvent in st.STANDARD_SELECTION_EVENTS: commandButton.bind("<{}>".format(selectionEvent), handler) # ------------------- time for action! ----------------- entryWidget.focus_force() # put the focus on the entryWidget boxRoot.deiconify() boxRoot.mainloop() # run it! # -------- after the run has completed ---------------------------------- if root: root.deiconify() boxRoot.destroy() # button_click didn't destroy boxRoot, so we do it now return __enterboxText
0.000451
def _add_listeners ( self ): """ Adds the event listeners for a specified object. """ object = self.value canvas = self.factory.canvas if canvas is not None: for name in canvas.node_children: object.on_trait_change(self._nodes_replaced, name) object.on_trait_change(self._nodes_changed, name + "_items") for name in canvas.edge_children: object.on_trait_change(self._edges_replaced, name) object.on_trait_change(self._edges_changed, name + "_items") else: raise ValueError("Graph canvas not set for graph editor.")
0.007541
def search(self, Queue=None, order=None, raw_query=None, Format='l', **kwargs): """ Search arbitrary needles in given fields and queue. Example:: >>> tracker = Rt('http://tracker.example.com/REST/1.0/', 'rt-username', 'top-secret') >>> tracker.login() >>> tickets = tracker.search(CF_Domain='example.com', Subject__like='warning') >>> tickets = tracker.search(Queue='General', order='Status', raw_query="id='1'+OR+id='2'+OR+id='3'") :keyword Queue: Queue where to search. If you wish to search across all of your queues, pass the ALL_QUEUES object as the argument. :keyword order: Name of field sorting result list, for descending order put - before the field name. E.g. -Created will put the newest tickets at the beginning :keyword raw_query: A raw query to provide to RT if you know what you are doing. You may still pass Queue and order kwargs, so use these instead of including them in the raw query. You can refer to the RT query builder. If passing raw_query, all other **kwargs will be ignored. :keyword Format: Format of the query: - i: only `id' fields are populated - s: only `id' and `subject' fields are populated - l: multi-line format, all fields are populated :keyword kwargs: Other arguments possible to set if not passing raw_query: Requestors, Subject, Cc, AdminCc, Owner, Status, Priority, InitialPriority, FinalPriority, TimeEstimated, Starts, Due, Text,... (according to RT fields) Custom fields CF.{<CustomFieldName>} could be set with keywords CF_CustomFieldName. To alter lookup operators you can append one of the following endings to each keyword: __exact for operator = (default) __notexact for operator != __gt for operator > __lt for operator < __like for operator LIKE __notlike for operator NOT LIKE Setting values to keywords constrain search result to the tickets satisfying all of them. :returns: List of matching tickets. Each ticket is the same dictionary as in :py:meth:`~Rt.get_ticket`. :raises: UnexpectedMessageFormat: Unexpected format of returned message. InvalidQueryError: If raw query is malformed """ get_params = {} query = [] url = 'search/ticket' if Queue is not ALL_QUEUES: query.append("Queue=\'{}\'".format(Queue or self.default_queue)) if not raw_query: operators_map = { 'gt': '>', 'lt': '<', 'exact': '=', 'notexact': '!=', 'like': ' LIKE ', 'notlike': ' NOT LIKE ' } for key, value in iteritems(kwargs): op = '=' key_parts = key.split('__') if len(key_parts) > 1: key = '__'.join(key_parts[:-1]) op = operators_map.get(key_parts[-1], '=') if key[:3] != 'CF_': query.append("{}{}\'{}\'".format(key, op, value)) else: query.append("'CF.{{{}}}'{}\'{}\'".format(key[3:], op, value)) else: query.append(raw_query) get_params['query'] = ' AND '.join('(' + part + ')' for part in query) if order: get_params['orderby'] = order get_params['format'] = Format msg = self.__request(url, get_params=get_params) lines = msg.split('\n') if len(lines) > 2: if self.__get_status_code(lines[0]) != 200 and lines[2].startswith('Invalid query: '): raise InvalidQueryError(lines[2]) if lines[2].startswith('No matching results.'): return [] if Format == 'l': msgs = map(lambda x: x.split('\n'), msg.split('\n--\n')) items = [] for msg in msgs: pairs = {} req_matching = [i for i, m in enumerate(msg) if self.RE_PATTERNS['requestors_pattern'].match(m)] req_id = req_matching[0] if req_matching else None if not req_id: raise UnexpectedMessageFormat('Missing line starting with `Requestors:`.') for i in range(req_id): if ': ' in msg[i]: header, content = self.split_header(msg[i]) pairs[header.strip()] = content.strip() requestors = [msg[req_id][12:]] req_id += 1 while (req_id < len(msg)) and (msg[req_id][:12] == ' ' * 12): requestors.append(msg[req_id][12:]) req_id += 1 pairs['Requestors'] = self.__normalize_list(requestors) for i in range(req_id, len(msg)): if ': ' in msg[i]: header, content = self.split_header(msg[i]) pairs[header.strip()] = content.strip() if pairs: items.append(pairs) if 'Cc' in pairs: pairs['Cc'] = self.__normalize_list(pairs['Cc']) if 'AdminCc' in pairs: pairs['AdminCc'] = self.__normalize_list(pairs['AdminCc']) if 'id' not in pairs and not pairs['id'].startswitch('ticket/'): raise UnexpectedMessageFormat('Response from RT didn\'t contain a valid ticket_id') else: pairs['numerical_id'] = pairs['id'].split('ticket/')[1] return items elif Format == 's': items = [] msgs = lines[2:] for msg in msgs: if "" == msg: # Ignore blank line at the end continue ticket_id, subject = self.split_header(msg) items.append({'id': 'ticket/' + ticket_id, 'numerical_id': ticket_id, 'Subject': subject}) return items elif Format == 'i': items = [] msgs = lines[2:] for msg in msgs: if "" == msg: # Ignore blank line at the end continue _, ticket_id = msg.split('/', 1) items.append({'id': 'ticket/' + ticket_id, 'numerical_id': ticket_id}) return items
0.003082
def fill_polygon_with_points(cls, goal=None, polygon=None): """ Fill a shapely polygon with X number of points """ if goal is None: raise ValueError("Must specify the number of points (goal) to fill the polygon with") if polygon is None or (not isinstance(polygon, Polygon) and not isinstance(polygon, MultiPolygon)): raise ValueError("Must specify a polygon to fill points with") minx = polygon.bounds[0] maxx = polygon.bounds[2] miny = polygon.bounds[1] maxy = polygon.bounds[3] points = [] now = time.time() while len(points) < goal: random_x = random.uniform(minx, maxx) random_y = random.uniform(miny, maxy) p = Point(random_x, random_y) if p.within(polygon): points.append(p) logger.info("Filling polygon with points took %f seconds" % (time.time() - now)) return points
0.0091
def _load_config(path: str) -> dict: """ Given a file path, parse it based on its extension (YAML or JSON) and return the values as a Python dictionary. JSON is the default if an extension can't be determined. """ __, ext = os.path.splitext(path) if ext in ['.yaml', '.yml']: import ruamel.yaml loader = ruamel.yaml.safe_load else: loader = json.load with open(path) as f: config = loader(f) return config
0.002096
def is_link_local(link_target): """ :param link_target: The target of a symbolic link, as given by os.readlink() :type link_target: string :returns: A boolean indicating the link is local to the current directory. This is defined to mean that os.path.isabs(link_target) == False and the link NEVER references the parent directory, so "./foo/../../curdir/foo" would return False. :rtype: boolean """ is_local=(not os.path.isabs(link_target)) if is_local: # make sure that the path NEVER extends outside the resources directory! d,l = os.path.split(link_target) link_parts = [] while l: link_parts.append(l) d,l = os.path.split(d) curr_path = os.sep for p in reversed(link_parts): is_local = (is_local and not (curr_path == os.sep and p == os.pardir) ) curr_path = os.path.abspath(os.path.join(curr_path, p)) return is_local
0.01001
def create_variable(self, varname, vtype=None): """Create a tk variable. If the variable was created previously return that instance. """ var_types = ('string', 'int', 'boolean', 'double') vname = varname var = None type_from_name = 'string' # default type if ':' in varname: type_from_name, vname = varname.split(':') # Fix incorrect order bug #33 if type_from_name not in (var_types): # Swap order type_from_name, vname = vname, type_from_name if type_from_name not in (var_types): raise Exception('Undefined variable type in "{0}"'.format(varname)) if vname in self.tkvariables: var = self.tkvariables[vname] else: if vtype is None: # get type from name if type_from_name == 'int': var = tkinter.IntVar() elif type_from_name == 'boolean': var = tkinter.BooleanVar() elif type_from_name == 'double': var = tkinter.DoubleVar() else: var = tkinter.StringVar() else: var = vtype() self.tkvariables[vname] = var return var
0.002235
def useQt(qtLib: str = 'PyQt5', period: float = 0.01): """ Run combined Qt5/asyncio event loop. Args: qtLib: Name of Qt library to use, can be 'PyQt5' or 'PySide2'. period: Period in seconds to poll Qt. """ def qt_step(): loop.call_later(period, qt_step) if not stack: qloop = QEventLoop() timer = QTimer() timer.timeout.connect(qloop.quit) stack.append((qloop, timer)) qloop, timer = stack.pop() timer.start(0) qloop.exec_() timer.stop() stack.append((qloop, timer)) if qtLib not in ('PyQt5', 'PySide2'): raise RuntimeError(f'Unknown Qt library: {qtLib}') if qtLib == 'PyQt5': from PyQt5.Qt import QApplication, QTimer, QEventLoop else: from PySide2.QtWidgets import QApplication from PySide2.QtCore import QTimer, QEventLoop global qApp qApp = QApplication.instance() or QApplication(sys.argv) loop = asyncio.get_event_loop() stack: list = [] qt_step()
0.000943
def localize_sources(gta, **kwargs): """Relocalize sources in the region of interest Parameters ---------- gta : `fermipy.gtaanalysis.GTAnalysis` The analysis object kwargs : These are passed to the gta.localize function """ # Localize all point sources for src in sorted(gta.roi.sources, key=lambda t: t['ts'], reverse=True): # for s in gta.roi.sources: if not src['SpatialModel'] == 'PointSource': continue if src['offset_roi_edge'] > -0.1: continue gta.localize(src.name, **kwargs) gta.optimize() gta.print_roi()
0.007599
def clean_email(self): """ Ensure the email address is not already registered. """ email = self.cleaned_data.get("email") qs = User.objects.exclude(id=self.instance.id).filter(email=email) if len(qs) == 0: return email raise forms.ValidationError( ugettext("This email is already registered"))
0.005089
def __setUserMinimumSize( self, section, oldSize, newSize ): """ Records the user minimum size for a column. :param section | <int> oldSize | <int> newSize | <int> """ if self.isVisible(): self._columnMinimums[section] = newSize
0.014577