text
stringlengths
78
104k
score
float64
0
0.18
def publishMap(self, maps_info, fsInfo=None, itInfo=None): """Publishes a list of maps. Args: maps_info (list): A list of JSON configuration maps to publish. Returns: list: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`. """ if self.securityhandler is None: print ("Security handler required") return itemInfo = None itemId = None map_results = None replaceInfo = None replaceItem = None map_info = None admin = None try: admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler) map_results = [] for map_info in maps_info: itemInfo = {} if 'ReplaceInfo' in map_info: replaceInfo = map_info['ReplaceInfo'] else: replaceInfo = None if replaceInfo != None: for replaceItem in replaceInfo: if replaceItem['ReplaceType'] == 'Layer': if fsInfo is not None: for fs in fsInfo: if fs is not None and replaceItem['ReplaceString'] == fs['ReplaceTag']: replaceItem['ReplaceString'] = fs['FSInfo']['url'] replaceItem['ItemID'] = fs['FSInfo']['itemId'] replaceItem['ItemFolder'] = fs['FSInfo']['folderId'] if 'convertCase' in fs['FSInfo']: replaceItem['convertCase'] = fs['FSInfo']['convertCase'] elif 'ItemID' in replaceItem: if 'ItemFolder' in replaceItem == False: itemId = replaceItem['ItemID'] itemInfo = admin.content.getItem(itemId=itemId) if itemInfo.owner: if itemInfo.owner == self._securityHandler.username and itemInfo.ownerFolder: replaceItem['ItemFolder'] = itemInfo.ownerFolder else: replaceItem['ItemFolder'] = None elif replaceItem['ReplaceType'] == 'Global': if itInfo is not None: for itm in itInfo: if itm is not None: if replaceItem['ReplaceString'] == itm['ReplaceTag']: if 'ItemInfo' in itm: if 'url' in itm['ItemInfo']: replaceItem['ReplaceString'] = itm['ItemInfo']['url'] if 'ReplaceTag' in map_info: itemInfo = {"ReplaceTag":map_info['ReplaceTag'] } else: itemInfo = {"ReplaceTag":"{WebMap}" } itemInfo['MapInfo'] = self._publishMap(config=map_info, replaceInfo=replaceInfo) map_results.append(itemInfo) print ("%s webmap created" % itemInfo['MapInfo']['Name']) return map_results except common.ArcRestHelperError as e: raise e except Exception as e: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "publishMap", "line": line, "filename": filename, "synerror": synerror, }) finally: itemInfo = None itemId = None replaceInfo = None replaceItem = None map_info = None admin = None del itemInfo del itemId del replaceInfo del replaceItem del map_info del admin gc.collect()
0.007123
def find_undeclared(nodes, names): """Check if the names passed are accessed undeclared. The return value is a set of all the undeclared names from the sequence of names found. """ visitor = UndeclaredNameVisitor(names) try: for node in nodes: visitor.visit(node) except VisitorExit: pass return visitor.undeclared
0.002695
def parse_sitelist(sitelist): """Return list of Site instances from retrieved sitelist data""" sites = [] for site in sitelist["Locations"]["Location"]: try: ident = site["id"] name = site["name"] except KeyError: ident = site["@id"] # Difference between loc-spec and text for some reason name = site["@name"] if "latitude" in site: lat = float(site["latitude"]) lon = float(site["longitude"]) else: lat = lon = None s = Site(ident, name, lat, lon) sites.append(s) return sites
0.0048
async def revoke(client: Client, revocation_signed_raw: str) -> ClientResponse: """ POST revocation document :param client: Client to connect to the api :param revocation_signed_raw: Certification raw document :return: """ return await client.post(MODULE + '/revoke', {'revocation': revocation_signed_raw}, rtype=RESPONSE_AIOHTTP)
0.005571
def get_raw_email(self): """ This only applies to raw payloads: https://sendgrid.com/docs/Classroom/Basics/Inbound_Parse_Webhook/setting_up_the_inbound_parse_webhook.html#-Raw-Parameters """ if 'email' in self.payload: raw_email = email.message_from_string(self.payload['email']) return raw_email else: return None
0.005025
def interact(self, container: Container) -> None: """ Connects to the PTY (pseudo-TTY) for a given container. Blocks until the user exits the PTY. """ cmd = "/bin/bash -c 'source /.environment && /bin/bash'" cmd = "docker exec -it {} {}".format(container.id, cmd) subprocess.call(cmd, shell=True)
0.005682
def main(path, pid, queue): """ Standalone PSQ worker. The queue argument must be the full importable path to a psq.Queue instance. Example usage: psqworker config.q psqworker --path /opt/app queues.fast """ setup_logging() if pid: with open(os.path.expanduser(pid), "w") as f: f.write(str(os.getpid())) if not path: path = os.getcwd() sys.path.insert(0, path) queue = import_queue(queue) import psq worker = psq.Worker(queue=queue) worker.listen()
0.001786
def copy_file_if_modified(src_path, dest_path): """Only copies the file from the source path to the destination path if it doesn't exist yet or it has been modified. Intended to provide something of an optimisation when a project has large trees of assets.""" # if the destination path is a directory, delete it completely - we assume here we are # writing a file to the filesystem if os.path.isdir(dest_path): shutil.rmtree(dest_path) must_copy = False if not os.path.exists(dest_path): must_copy = True else: src_stat = os.stat(src_path) dest_stat = os.stat(dest_path) # if the size or last modified timestamp are different if ((src_stat[stat.ST_SIZE] != dest_stat[stat.ST_SIZE]) or (src_stat[stat.ST_MTIME] != dest_stat[stat.ST_MTIME])): must_copy = True if must_copy: shutil.copy2(src_path, dest_path)
0.004306
def not_a_string(obj): """It's probably not a string, in the sense that Python2/3 get confused about these things""" my_type = str(type(obj)) if is_py3(): is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0 return is_str return my_type.find('str') < 0 and \ my_type.find('unicode') < 0
0.002933
def _linux_brshow(br=None): ''' Internal, returns bridges and enslaved interfaces (GNU/Linux - brctl) ''' brctl = _tool_path('brctl') if br: cmd = '{0} show {1}'.format(brctl, br) else: cmd = '{0} show'.format(brctl) brs = {} for line in __salt__['cmd.run'](cmd, python_shell=False).splitlines(): # get rid of first line if line.startswith('bridge name'): continue # get rid of ^\n's vals = line.split() if not vals: continue # bridge name bridge id STP enabled interfaces # br0 8000.e4115bac8ddc no eth0 # foo0 # br1 8000.e4115bac8ddc no eth1 if len(vals) > 1: brname = vals[0] brs[brname] = { 'id': vals[1], 'stp': vals[2], } if len(vals) > 3: brs[brname]['interfaces'] = [vals[3]] if len(vals) == 1 and brname: brs[brname]['interfaces'].append(vals[0]) if br: try: return brs[br] except KeyError: return None return brs
0.000823
def rmgen(self, idx): """ Remove the static generators if their dynamic models exist Parameters ---------- idx : list A list of static generator idx Returns ------- None """ stagens = [] for device, stagen in zip(self.devman.devices, self.call.stagen): if stagen: stagens.append(device) for gen in idx: for stagen in stagens: if gen in self.__dict__[stagen].uid.keys(): self.__dict__[stagen].disable_gen(gen)
0.003373
def get_info(df, verbose = None,max_cols = None, memory_usage = None, null_counts = None): """ Returns the .info() output of a dataframe """ assert type(df) is pd.DataFrame buffer = io.StringIO() df.info(verbose, buffer, max_cols, memory_usage, null_counts) return buffer.getvalue()
0.035948
def add_context(self, name, context, prefix_char=None): """Add a context to the suite. Args: name (str): Name to store the context under. context (ResolvedContext): Context to add. """ if name in self.contexts: raise SuiteError("Context already in suite: %r" % name) if not context.success: raise SuiteError("Context is not resolved: %r" % name) self.contexts[name] = dict(name=name, context=context.copy(), tool_aliases={}, hidden_tools=set(), priority=self._next_priority, prefix_char=prefix_char) self._flush_tools()
0.0025
def info(self, section='default'): """Get information and statistics about the server. If called without argument will return default set of sections. For available sections, see http://redis.io/commands/INFO :raises ValueError: if section is invalid """ if not section: raise ValueError("invalid section") fut = self.execute(b'INFO', section, encoding='utf-8') return wait_convert(fut, parse_info)
0.004193
def unwrap(self): """ Returns a GLFWvidmode object. """ size = self.Size(self.width, self.height) bits = self.Bits(self.red_bits, self.green_bits, self.blue_bits) return self.GLFWvidmode(size, bits, self.refresh_rate)
0.007547
def _get_methods_that_calculate_outputs(inputs, outputs, methods): ''' Given iterables of input variable names, output variable names, and a methods dictionary, returns the subset of the methods dictionary that can be calculated, doesn't calculate something we already have, and only contains equations that might help calculate the outputs from the inputs. ''' # Get a list of everything that we can possibly calculate # This is useful in figuring out whether we can calculate arguments intermediates = get_calculatable_quantities(inputs, methods) # Initialize our return dictionary return_methods = {} # list so that we can append arguments that need to be output for # some of the paths outputs = list(outputs) # keep track of when to exit the while loop keep_going = True while keep_going: # If there are no updates in a pass, the loop will exit keep_going = False for output in outputs: try: output_dict = return_methods[output] except: output_dict = {} for args, func in methods[output].items(): # only check the method if we're not already returning it if args not in output_dict.keys(): # Initialize a list of intermediates needed to use # this method, to add to outputs if we find we can # use it. needed = [] for arg in args: if arg in inputs: # we have this argument pass elif arg in outputs: # we may need to calculate one output using # another output pass elif arg in intermediates: if arg not in outputs: # don't need to add to needed if it's already # been put in outputs needed.append(arg) else: # Can't do this func break else: # did not break, can calculate this output_dict[args] = func if len(needed) > 0: # We added an output, so need another loop outputs.extend(needed) keep_going = True if len(output_dict) > 0: return_methods[output] = output_dict return return_methods
0.000743
def get_web_file(file_url, file_name, auth=None, blocksize=1024*1024): '''Get a file from the web (HTTP). file_url: The URL of the file to get file_name: Local path to save loaded file in auth: A tuple (httpproxy, proxyuser, proxypass) blocksize: Block size of file reads Will try simple load through urllib first. Drop down to urllib2 if there is a proxy and it requires authentication. Environment variable HTTP_PROXY can be used to supply proxy information. PROXY_USERNAME is used to supply the authentication username. PROXY_PASSWORD supplies the password, if you dare! ''' # Simple fetch, if fails, check for proxy error try: urllib.urlretrieve(file_url, file_name) return (True, auth) # no proxy, no auth required except IOError, e: if e[1] == 407: # proxy error pass elif e[1][0] == 113: # no route to host print 'No route to host for %s' % file_url return (False, auth) # return False else: print 'Unknown connection error to %s' % file_url return (False, auth) # We get here if there was a proxy error, get file through the proxy # unpack auth info try: (httpproxy, proxyuser, proxypass) = auth except: (httpproxy, proxyuser, proxypass) = (None, None, None) # fill in any gaps from the environment if httpproxy is None: httpproxy = os.getenv('HTTP_PROXY') if proxyuser is None: proxyuser = os.getenv('PROXY_USERNAME') if proxypass is None: proxypass = os.getenv('PROXY_PASSWORD') # Get auth info from user if still not supplied if httpproxy is None or proxyuser is None or proxypass is None: print '-'*72 print ('You need to supply proxy authentication information.') if httpproxy is None: httpproxy = raw_input(' proxy server: ') else: print ' HTTP proxy was supplied: %s' % httpproxy if proxyuser is None: proxyuser = raw_input(' proxy username: ') else: print 'HTTP proxy username was supplied: %s' % proxyuser if proxypass is None: proxypass = getpass.getpass(' proxy password: ') else: print 'HTTP proxy password was supplied: %s' % '*'*len(proxyuser) print '-'*72 # the proxy URL cannot start with 'http://', we add that later httpproxy = httpproxy.lower() if httpproxy.startswith('http://'): httpproxy = httpproxy.replace('http://', '', 1) # open remote file proxy = urllib2.ProxyHandler({'http': 'http://' + proxyuser + ':' + proxypass + '@' + httpproxy}) authinfo = urllib2.HTTPBasicAuthHandler() opener = urllib2.build_opener(proxy, authinfo, urllib2.HTTPHandler) urllib2.install_opener(opener) try: webget = urllib2.urlopen(file_url) except urllib2.HTTPError, e: print 'Error received from proxy:\n%s' % str(e) print 'Possibly the user/password is wrong.' return (False, (httpproxy, proxyuser, proxypass)) # transfer file to local filesystem fd = open(file_name, 'wb') while True: data = webget.read(blocksize) if len(data) == 0: break fd.write(data) fd.close webget.close() # return successful auth info return (True, (httpproxy, proxyuser, proxypass))
0.001117
def favourite_filters(self): """Get a list of filter Resources which are the favourites of the currently authenticated user. :rtype: List[Filter] """ r_json = self._get_json('filter/favourite') filters = [Filter(self._options, self._session, raw_filter_json) for raw_filter_json in r_json] return filters
0.008065
def mesh_to_collada(mesh): ''' Supports per-vertex color, but nothing else. ''' import numpy as np try: from collada import Collada, scene except ImportError: raise ImportError("lace.serialization.dae.mesh_to_collade requires package pycollada.") def create_material(dae): from collada import material, scene effect = material.Effect("effect0", [], "phong", diffuse=(1, 1, 1), specular=(0, 0, 0), double_sided=True) mat = material.Material("material0", "mymaterial", effect) dae.effects.append(effect) dae.materials.append(mat) return scene.MaterialNode("materialref", mat, inputs=[]) def geometry_from_mesh(dae, mesh): from collada import source, geometry srcs = [] # v srcs.append(source.FloatSource("verts-array", mesh.v, ('X', 'Y', 'Z'))) input_list = source.InputList() input_list.addInput(0, 'VERTEX', "#verts-array") # vc if mesh.vc is not None: input_list.addInput(len(srcs), 'COLOR', "#color-array") srcs.append(source.FloatSource("color-array", mesh.vc[mesh.f.ravel()], ('X', 'Y', 'Z'))) # f geom = geometry.Geometry(str(mesh), "geometry0", "mymesh", srcs) indices = np.dstack([mesh.f for _ in srcs]).ravel() triset = geom.createTriangleSet(indices, input_list, "materialref") geom.primitives.append(triset) # e if mesh.e is not None: indices = np.dstack([mesh.e for _ in srcs]).ravel() lineset = geom.createLineSet(indices, input_list, "materialref") geom.primitives.append(lineset) dae.geometries.append(geom) return geom dae = Collada() geom = geometry_from_mesh(dae, mesh) node = scene.Node("node0", children=[scene.GeometryNode(geom, [create_material(dae)])]) myscene = scene.Scene("myscene", [node]) dae.scenes.append(myscene) dae.scene = myscene return dae
0.003005
def _load_defaults(self, default='settings.py'): ''' Load the default settings ''' if default[-3:] == '.py': default = default[:-3] self.my_settings = {} try: settings = importlib.import_module(default) self.my_settings = self._convert_to_dict(settings) except ImportError: log.warning("No default settings found")
0.004773
def format_graylog_v0(self, record): ''' Graylog 'raw' format is essentially the raw record, minimally munged to provide the bare minimum that td-agent requires to accept and route the event. This is well suited to a config where the client td-agents log directly to Graylog. ''' message_dict = { 'message': record.getMessage(), 'timestamp': self.formatTime(record), # Graylog uses syslog levels, not whatever it is Python does... 'level': syslog_levels.get(record.levelname, 'ALERT'), 'tag': self.tag } if record.exc_info: exc_info = self.formatException(record.exc_info) message_dict.update({'full_message': exc_info}) # Add any extra attributes to the message field for key, value in six.iteritems(record.__dict__): if key in ('args', 'asctime', 'bracketlevel', 'bracketname', 'bracketprocess', 'created', 'exc_info', 'exc_text', 'id', 'levelname', 'levelno', 'msecs', 'msecs', 'message', 'msg', 'relativeCreated', 'version'): # These are already handled above or explicitly pruned. continue if isinstance(value, (six.string_types, bool, dict, float, int, list, types.NoneType)): # pylint: disable=W1699 val = value else: val = repr(value) message_dict.update({'{0}'.format(key): val}) return message_dict
0.005833
def debug(message, level=1): """ So we can tune how much debug output we get when we turn it on. """ if level <= debug_level: logging.debug(' ' * (level - 1) * 2 + str(message))
0.004975
def unpack(packed): """ Unpack the function and args then apply the function to the arguments and return result :param packed: input packed tuple of (func, args) :return: result of applying packed function on packed args """ func, args = serializer.loads(packed) result = func(*args) if isinstance(result, collections.Iterable): return list(result) return None
0.00495
def _load_machines_cache(self): """This method should fill up `_machines_cache` from scratch. It could happen only in two cases: 1. During class initialization 2. When all etcd members failed""" self._update_machines_cache = True if 'srv' not in self._config and 'host' not in self._config and 'hosts' not in self._config: raise Exception('Neither srv, hosts, host nor url are defined in etcd section of config') self._machines_cache = self._get_machines_cache_from_config() # Can not bootstrap list of etcd-cluster members, giving up if not self._machines_cache: raise etcd.EtcdException # After filling up initial list of machines_cache we should ask etcd-cluster about actual list self._base_uri = self._next_server() self._refresh_machines_cache() self._update_machines_cache = False self._machines_cache_updated = time.time()
0.005144
def _hex_to_rgb(color: str) -> Tuple[int, ...]: """Convert hex color to RGB format. :param color: Hex color. :return: RGB tuple. """ if color.startswith('#'): color = color.lstrip('#') return tuple(int(color[i:i + 2], 16) for i in (0, 2, 4))
0.006623
def line_spacing_rule(self): """ A member of the :ref:`WdLineSpacing` enumeration indicating how the value of :attr:`line_spacing` should be interpreted. Assigning any of the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or :attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing` to be updated to produce the corresponding line spacing. """ pPr = self._element.pPr if pPr is None: return None return self._line_spacing_rule( pPr.spacing_line, pPr.spacing_lineRule )
0.003311
def docs(recreate, gen_index, run_doctests): # type: (bool, bool, bool) -> None """ Build the documentation for the project. Args: recreate (bool): If set to **True**, the build and output directories will be cleared prior to generating the docs. gen_index (bool): If set to **True**, it will generate top-level index file for the reference documentation. run_doctests (bool): Set to **True** if you want to run doctests after the documentation is generated. pretend (bool): If set to **True**, do not actually execute any shell commands, just print the command that would be executed. """ build_dir = conf.get_path('build_dir', '.build') docs_dir = conf.get_path('docs.path', 'docs') refdoc_paths = conf.get('docs.reference', []) docs_html_dir = conf.get_path('docs.out', os.path.join(docs_dir, 'html')) docs_tests_dir = conf.get_path('docs.tests_out', os.path.join(docs_dir, 'doctest')) docs_build_dir = os.path.join(build_dir, 'docs') if recreate: for path in (docs_html_dir, docs_build_dir): if os.path.exists(path): log.info("<91>Deleting <94>{}".format(path)) shutil.rmtree(path) if refdoc_paths: gen_ref_docs(gen_index) else: log.err('Not generating any reference documentation - ' 'No docs.reference specified in config') with conf.within_proj_dir(docs_dir): log.info('Building docs') shell.run('sphinx-build -b html -d {build} {docs} {out}'.format( build=docs_build_dir, docs=docs_dir, out=docs_html_dir, )) if run_doctests: log.info('Running doctests') shell.run('sphinx-build -b doctest -d {build} {docs} {out}'.format( build=docs_build_dir, docs=docs_dir, out=docs_tests_dir, )) log.info('You can view the docs by browsing to <34>file://{}'.format( os.path.join(docs_html_dir, 'index.html') ))
0.001367
def deserialize(self, obj, encoders=None, embedded=False, create_instance=True): """ Deserializes a given object, i.e. converts references to other (known) `Document` objects by lazy instances of the corresponding class. This allows the automatic fetching of related documents from the database as required. :param obj: The object to be deserialized. :returns: The deserialized object. """ if not encoders: encoders = [] for encoder in encoders + self.standard_encoders: obj = encoder.decode(obj) if isinstance(obj, dict): if create_instance and '__collection__' in obj and obj['__collection__'] in self.collections and 'pk' in obj: #for backwards compatibility attributes = copy.deepcopy(obj) del attributes['__collection__'] if '__ref__' in attributes: del attributes['__ref__'] if '__lazy__' in attributes: lazy = attributes['__lazy__'] del attributes['__lazy__'] else: lazy = True output_obj = self.create_instance(obj['__collection__'], attributes, lazy=lazy) else: output_obj = {} for key, value in obj.items(): output_obj[key] = self.deserialize(value,encoders = encoders) elif isinstance(obj, (list,tuple)): output_obj = list(map(lambda x: self.deserialize(x), obj)) else: output_obj = obj return output_obj
0.007961
def load(self, callables_fname): r""" Load traced modules information from a `JSON <http://www.json.org/>`_ file. The loaded module information is merged with any existing module information :param callables_fname: File name :type callables_fname: :ref:`FileNameExists` :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \\`callables_fname\\` is not valid) """ # Validate file name _validate_fname(callables_fname) if not os.path.exists(callables_fname): raise OSError("File {0} could not be found".format(callables_fname)) with open(callables_fname, "r") as fobj: fdict = json.load(fobj) if sys.hexversion < 0x03000000: # pragma: no cover fdict = _unicode_to_ascii(fdict) self._callables_db.update(fdict["_callables_db"]) # Reverse the tuple-to-string conversion that the save method # does due to the fact that JSON keys need to be strings and the # keys of the reverse callable dictionary are tuples where the first # item is a file name and the second item is the starting line of the # callable within that file (dictionary value) rdict = {} for key, value in fdict["_reverse_callables_db"].items(): tokens = key[1:-1].split(",") key = tokens[0].strip()[1:-1] if platform.system().lower() == "windows": # pragma: no cover while True: tmp = key key = key.replace("\\\\", "\\") if tmp == key: break rdict[(key, int(tokens[1]))] = value self._reverse_callables_db.update(rdict) self._modules_dict.update(fdict["_modules_dict"]) self._fnames.update(fdict["_fnames"]) self._module_names.extend(fdict["_module_names"]) self._class_names.extend(fdict["_class_names"]) self._module_names = sorted(list(set(self._module_names))) self._class_names = sorted(list(set(self._class_names)))
0.002342
def node_factory(**row_factory_kw): """ Give new nodes a unique ID. """ if "__table_editor__" in row_factory_kw: graph = row_factory_kw["__table_editor__"].object ID = make_unique_name("n", [node.ID for node in graph.nodes]) del row_factory_kw["__table_editor__"] return godot.node.Node(ID) else: return godot.node.Node(uuid.uuid4().hex[:6])
0.002538
async def connect_controller(self, controller_name=None): """Connect to a controller by name. If the name is empty, it connect to the current controller. """ if not controller_name: controller_name = self.jujudata.current_controller() if not controller_name: raise JujuConnectionError('No current controller') controller = self.jujudata.controllers()[controller_name] # TODO change Connection so we can pass all the endpoints # instead of just the first. endpoint = controller['api-endpoints'][0] accounts = self.jujudata.accounts().get(controller_name, {}) await self.connect( endpoint=endpoint, uuid=None, username=accounts.get('user'), password=accounts.get('password'), cacert=controller.get('ca-cert'), bakery_client=self.bakery_client_for_controller(controller_name), ) self.controller_name = controller_name
0.001969
def extension_elements_to_elements(extension_elements, schemas): """ Create a list of elements each one matching one of the given extension elements. This is of course dependent on the access to schemas that describe the extension elements. :param extension_elements: The list of extension elements :param schemas: Imported Python modules that represent the different known schemas used for the extension elements :return: A list of elements, representing the set of extension elements that was possible to match against a Class in the given schemas. The elements returned are the native representation of the elements according to the schemas. """ res = [] if isinstance(schemas, list): pass elif isinstance(schemas, dict): schemas = list(schemas.values()) else: return res for extension_element in extension_elements: for schema in schemas: inst = extension_element_to_element(extension_element, schema.ELEMENT_FROM_STRING, schema.NAMESPACE) if inst: res.append(inst) break return res
0.000795
def calcTightAnchors(args, d, patches): """ Recursively generates the number of anchor points specified in the patches argument, such that all patches are d cells away from their nearest neighbors. """ centerPoint = (int(args.worldSize/2), int(args.worldSize/2)) anchors = [] if patches == 0: pass elif patches == 1: anchors.append(centerPoint) elif patches % 2 == 0: dsout = int((patches-2)//2) + 1 add_anchors(centerPoint, d, dsout, anchors, True) if d != 0: anchors = list(set(anchors)) anchors.sort() if dsout != 1: return (anchors + calcTightAnchors(args, d, patches-2) )[:patches*patches] # to cut off the extras in the case where d=0 else: # Note - an odd number of args.patchesPerSide requires that there be # a patch at the centerpoint dsout = int((patches-1)//2) add_anchors(centerPoint, d, dsout, anchors, False) if dsout != 1: return anchors + calcTightAnchors(d, patches-2) return anchors
0.00088
def spell(word: str, engine: str = "pn") -> List[str]: """ :param str word: word to check spelling :param str engine: * pn - Peter Norvig's algorithm (default) :return: list of words """ return DEFAULT_SPELL_CHECKER.spell(word)
0.003846
def _update(self, data, *args, **kwargs): """ The only thing that *should* happen in this function is 1. input sanitization for pandas 2. classification/reclassification. Using their __init__ methods, all classifiers can re-classify given different input parameters or additional data. If you've got a cleverer updating equation than the intial estimation equation, remove the call to self.__init__ below and replace it with the updating function. """ if data is not None: data = np.asarray(data).flatten() data = np.append(data.flatten(), self.y) else: data = self.y self.__init__(data, *args, **kwargs)
0.002681
def translate(s, table, deletions=""): """translate(s,table [,deletions]) -> string Return a copy of the string s, where all characters occurring in the optional argument deletions are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256. The deletions argument is not allowed for Unicode strings. """ if deletions or table is None: return s.translate(table, deletions) else: # Add s[:0] so that if s is Unicode and table is an 8-bit string, # table is converted to Unicode. This means that table *cannot* # be a dictionary -- for that feature, use u.translate() directly. return s.translate(table + s[:0])
0.001312
def fan_maxcfm(ddtt): """return the fan max cfm""" if str(ddtt.Maximum_Flow_Rate).lower() == 'autosize': # str can fail with unicode chars :-( return 'autosize' else: m3s = float(ddtt.Maximum_Flow_Rate) return m3s2cfm(m3s)
0.007634
def merge(arg, *rest, **kwargs): """Merge a collection, with functions as items, into a single function that takes a collection and maps its items through corresponding functions. :param arg: A collection of functions, such as list, tuple, or dictionary :param default: Optional default function to use for items within merged function's arguments that do not have corresponding functions in ``arg`` Example with two-element tuple:: >> dict_ = {'Alice': -5, 'Bob': 4} >> func = merge((str.upper, abs)) >> dict(map(func, dict_.items())) {'ALICE': 5, 'BOB': 4} Example with a dictionary:: >> func = merge({'id': int, 'name': str.split}) >> data = [ {'id': '1', 'name': "John Doe"}, {'id': '2', 'name': "Anne Arbor"}, ] >> list(map(func, data)) [{'id': 1, 'name': ['John', 'Doe']}, {'id': 2, 'name': ['Anne', 'Arbor']}] :return: Merged function .. versionadded:: 0.0.2 """ ensure_keyword_args(kwargs, optional=('default',)) has_default = 'default' in kwargs if has_default: default = ensure_callable(kwargs['default']) # if more than one argument was given, they must all be functions; # result will be a function that takes multiple arguments (rather than # a single collection) and returns a tuple unary_result = True if rest: fs = (ensure_callable(arg),) + tuple(imap(ensure_callable, rest)) unary_result = False else: fs = arg if is_mapping(fs): if has_default: return lambda arg_: fs.__class__((k, fs.get(k, default)(arg_[k])) for k in arg_) else: return lambda arg_: fs.__class__((k, fs[k](arg_[k])) for k in arg_) else: ensure_sequence(fs) if has_default: # we cannot use ``izip_longest(fs, arg_, fillvalue=default)``, # because we want to terminate the generator # only when ``arg_`` is exhausted (not when just ``fs`` is) func = lambda arg_: fs.__class__( (fs[i] if i < len(fs) else default)(x) for i, x in enumerate(arg_)) else: # we cannot use ``izip(fs, arg_)`` because it would short-circuit # if ``arg_`` is longer than ``fs``, rather than raising # the required ``IndexError`` func = lambda arg_: fs.__class__(fs[i](x) for i, x in enumerate(arg_)) return func if unary_result else lambda *args: func(args)
0.001103
def get_table_content(self, table): """trick to get table content without actually writing it return an aligned list of lists containing table cells values as string """ result = [[]] cols = table.cols for cell in self.compute_content(table): if cols == 0: result.append([]) cols = table.cols cols -= 1 result[-1].append(cell) # fill missing cells while len(result[-1]) < cols: result[-1].append("") return result
0.003534
def hr(self): """compute (height,round) We might have multiple rounds before we see consensus for a certain height. If everything is good, round should always be 0. """ assert len(self), 'no votes, can not determine height' h = set([(v.height, v.round) for v in self.votes]) assert len(h) == 1, len(h) return h.pop()
0.007895
def _IsPresent(item): """Given a (FieldDescriptor, value) tuple from _fields, return true if the value should be included in the list returned by ListFields().""" if item[0].label == _FieldDescriptor.LABEL_REPEATED: return bool(item[1]) elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: return item[1]._is_present_in_parent else: return True
0.013405
def processPointOfSalePayment(request): ''' This view handles the callbacks from point-of-sale transactions. Please note that this will only work if you have set up your callback URL in Square to point to this view. ''' print('Request data is: %s' % request.GET) # iOS transactions put all response information in the data key: data = json.loads(request.GET.get('data','{}')) if data: status = data.get('status') errorCode = data.get('error_code') errorDescription = errorCode try: stateData = data.get('state','') if stateData: metadata = json.loads(b64decode(unquote(stateData).encode()).decode()) else: metadata = {} except (TypeError, ValueError, binascii.Error): logger.error('Invalid metadata passed from Square app.') messages.error( request, format_html( '<p>{}</p><ul><li><strong>CODE:</strong> {}</li><li><strong>DESCRIPTION:</strong> {}</li></ul>', str(_('ERROR: Error with Square point of sale transaction attempt.')), str(_('Invalid metadata passed from Square app.')), ) ) return HttpResponseRedirect(reverse('showRegSummary')) # This is the normal transaction identifier, which will be stored in the # database as a SquarePaymentRecord serverTransId = data.get('transaction_id') # This is the only identifier passed for non-card transactions. clientTransId = data.get('client_transaction_id') else: # Android transactions use this GET response syntax errorCode = request.GET.get('com.squareup.pos.ERROR_CODE') errorDescription = request.GET.get('com.squareup.pos.ERROR_DESCRIPTION') status = 'ok' if not errorCode else 'error' # This is the normal transaction identifier, which will be stored in the # database as a SquarePaymentRecord serverTransId = request.GET.get('com.squareup.pos.SERVER_TRANSACTION_ID') # This is the only identifier passed for non-card transactions. clientTransId = request.GET.get('com.squareup.pos.CLIENT_TRANSACTION_ID') # Load the metadata, which includes the registration or invoice ids try: stateData = request.GET.get('com.squareup.pos.REQUEST_METADATA','') if stateData: metadata = json.loads(b64decode(unquote(stateData).encode()).decode()) else: metadata = {} except (TypeError, ValueError, binascii.Error): logger.error('Invalid metadata passed from Square app.') messages.error( request, format_html( '<p>{}</p><ul><li><strong>CODE:</strong> {}</li><li><strong>DESCRIPTION:</strong> {}</li></ul>', str(_('ERROR: Error with Square point of sale transaction attempt.')), str(_('Invalid metadata passed from Square app.')), ) ) return HttpResponseRedirect(reverse('showRegSummary')) # Other things that can be passed in the metadata sourceUrl = metadata.get('sourceUrl',reverse('showRegSummary')) successUrl = metadata.get('successUrl',reverse('registration')) submissionUserId = metadata.get('userId', getattr(getattr(request,'user',None),'id',None)) transactionType = metadata.get('transaction_type') taxable = metadata.get('taxable', False) addSessionInfo = metadata.get('addSessionInfo',False) customerEmail = metadata.get('customerEmail') if errorCode or status != 'ok': # Return the user to their original page with the error message displayed. logger.error('Error with Square point of sale transaction attempt. CODE: %s; DESCRIPTION: %s' % (errorCode, errorDescription)) messages.error( request, format_html( '<p>{}</p><ul><li><strong>CODE:</strong> {}</li><li><strong>DESCRIPTION:</strong> {}</li></ul>', str(_('ERROR: Error with Square point of sale transaction attempt.')), errorCode, errorDescription ) ) return HttpResponseRedirect(sourceUrl) api_instance = TransactionsApi() api_instance.api_client.configuration.access_token = getattr(settings,'SQUARE_ACCESS_TOKEN','') location_id = getattr(settings,'SQUARE_LOCATION_ID','') if serverTransId: try: api_response = api_instance.retrieve_transaction(transaction_id=serverTransId,location_id=location_id) except ApiException: logger.error('Unable to find Square transaction by server ID.') messages.error(request,_('ERROR: Unable to find Square transaction by server ID.')) return HttpResponseRedirect(sourceUrl) if api_response.errors: logger.error('Unable to find Square transaction by server ID: %s' % api_response.errors) messages.error(request,str(_('ERROR: Unable to find Square transaction by server ID:')) + api_response.errors) return HttpResponseRedirect(sourceUrl) transaction = api_response.transaction elif clientTransId: # Try to find the transaction in the 50 most recent transactions try: api_response = api_instance.list_transactions(location_id=location_id) except ApiException: logger.error('Unable to find Square transaction by client ID.') messages.error(request,_('ERROR: Unable to find Square transaction by client ID.')) return HttpResponseRedirect(sourceUrl) if api_response.errors: logger.error('Unable to find Square transaction by client ID: %s' % api_response.errors) messages.error(request,str(_('ERROR: Unable to find Square transaction by client ID:')) + api_response.errors) return HttpResponseRedirect(sourceUrl) transactions_list = [x for x in api_response.transactions if x.client_id == clientTransId] if len(transactions_list) == 1: transaction = transactions_list[0] else: logger.error('Returned client transaction ID not found.') messages.error(request,_('ERROR: Returned client transaction ID not found.')) return HttpResponseRedirect(sourceUrl) else: logger.error('An unknown error has occurred with Square point of sale transaction attempt.') messages.error(request,_('ERROR: An unknown error has occurred with Square point of sale transaction attempt.')) return HttpResponseRedirect(sourceUrl) # Get total information from the transaction for handling invoice. this_total = sum([x.amount_money.amount / 100 for x in transaction.tenders or []]) - \ sum([x.amount_money.amount / 100 for x in transaction.refunds or []]) # Parse if a specific submission user is indicated submissionUser = None if submissionUserId: try: submissionUser = User.objects.get(id=int(submissionUserId)) except (ValueError, ObjectDoesNotExist): logger.warning('Invalid user passed, submissionUser will not be recorded.') if 'registration' in metadata.keys(): try: tr_id = int(metadata.get('registration')) tr = TemporaryRegistration.objects.get(id=tr_id) except (ValueError, TypeError, ObjectDoesNotExist): logger.error('Invalid registration ID passed: %s' % metadata.get('registration')) messages.error( request, str(_('ERROR: Invalid registration ID passed')) + ': %s' % metadata.get('registration') ) return HttpResponseRedirect(sourceUrl) tr.expirationDate = timezone.now() + timedelta(minutes=getConstant('registration__sessionExpiryMinutes')) tr.save() this_invoice = Invoice.get_or_create_from_registration(tr, submissionUser=submissionUser) this_description = _('Registration Payment: #%s' % tr_id) elif 'invoice' in metadata.keys(): try: this_invoice = Invoice.objects.get(id=int(metadata.get('invoice'))) this_description = _('Invoice Payment: %s' % this_invoice.id) except (ValueError, TypeError, ObjectDoesNotExist): logger.error('Invalid invoice ID passed: %s' % metadata.get('invoice')) messages.error( request, str(_('ERROR: Invalid invoice ID passed')) + ': %s' % metadata.get('invoice') ) return HttpResponseRedirect(sourceUrl) else: # Gift certificates automatically get a nicer invoice description if transactionType == 'Gift Certificate': this_description = _('Gift Certificate Purchase') else: this_description = transactionType this_invoice = Invoice.create_from_item( this_total, this_description, submissionUser=submissionUser, calculate_taxes=(taxable is not False), transactionType=transactionType, ) paymentRecord, created = SquarePaymentRecord.objects.get_or_create( transactionId=transaction.id, locationId=transaction.location_id, defaults={'invoice': this_invoice,} ) if created: # We process the payment now, and enqueue the job to retrieve the # transaction again once fees have been calculated by Square this_invoice.processPayment( amount=this_total, fees=0, paidOnline=True, methodName='Square Point of Sale', methodTxn=transaction.id, notify=customerEmail, ) updateSquareFees.schedule(args=(paymentRecord,), delay=60) if addSessionInfo: paymentSession = request.session.get(INVOICE_VALIDATION_STR, {}) paymentSession.update({ 'invoiceID': str(this_invoice.id), 'amount': this_total, 'successUrl': successUrl, }) request.session[INVOICE_VALIDATION_STR] = paymentSession return HttpResponseRedirect(successUrl)
0.005854
def _format_with_same_year_and_month(format_specifier): """ Return a version of `format_specifier` that renders a date assuming it has the same year and month as another date. Usually this means ommitting the year and month. This can be overridden by specifying a format that has `_SAME_YEAR_SAME_MONTH` appended to the name in the project's `formats` spec. """ test_format_specifier = format_specifier + "_SAME_YEAR_SAME_MONTH" test_format = get_format(test_format_specifier, use_l10n=True) if test_format == test_format_specifier: # this format string didn't resolve to anything and may be a raw format. # Use a regex to remove year and month markers instead. no_year = re.sub(YEAR_RE, '', get_format(format_specifier)) return re.sub(MONTH_RE, '', no_year) else: return test_format
0.005714
def p_boolean_expr(self, p): '''boolean_expr : expr AND expr | expr AMPERSAND expr | expr OR expr | expr IMPLY expr | expr EQUIV expr | NOT expr %prec UMINUS | bool_type''' if len(p) == 4: p[0] = (p[2], (p[1], p[3])) elif len(p) == 3: p[0] = (p[1], (p[2],)) elif len(p) == 2: p[0] = ('boolean', p[1])
0.003906
def extract(self, destination): """Extracts the contents of the archive to the specifed directory. Args: destination (str): Path to an empty directory to extract the files to. """ if os.path.exists(destination): raise OSError(20, 'Destination exists', destination) self.__extract_directory( '.', self.files['files'], destination )
0.004376
def generator(name): """ Return generator by its name :param name: name of hash-generator :return: WHashGeneratorProto class """ name = name.upper() if name not in WHash.__hash_map__.keys(): raise ValueError('Hash generator "%s" not available' % name) return WHash.__hash_map__[name]
0.033113
def _detect(ip, _isnm): """Function internally used to detect the notation of the given IP or netmask.""" ip = str(ip) if len(ip) > 1: if ip[0:2] == '0x': if _CHECK_FUNCT[IP_HEX][_isnm](ip): return IP_HEX elif ip[0] == '0': if _CHECK_FUNCT[IP_OCT][_isnm](ip): return IP_OCT if _CHECK_FUNCT[IP_DOT][_isnm](ip): return IP_DOT elif _isnm and _CHECK_FUNCT[NM_BITS][_isnm](ip): return NM_BITS elif _CHECK_FUNCT[IP_DEC][_isnm](ip): return IP_DEC elif _isnm and _CHECK_FUNCT[NM_WILDCARD][_isnm](ip): return NM_WILDCARD elif _CHECK_FUNCT[IP_BIN][_isnm](ip): return IP_BIN return IP_UNKNOWN
0.001364
def update_security_group_rule(context, id, security_group_rule): '''Updates a rule and updates the ports''' LOG.info("update_security_group_rule for tenant %s" % (context.tenant_id)) new_rule = security_group_rule["security_group_rule"] # Only allow updatable fields new_rule = _filter_update_security_group_rule(new_rule) with context.session.begin(): rule = db_api.security_group_rule_find(context, id=id, scope=db_api.ONE) if not rule: raise sg_ext.SecurityGroupRuleNotFound(id=id) db_rule = db_api.security_group_rule_update(context, rule, **new_rule) group_id = db_rule.group_id group = db_api.security_group_find(context, id=group_id, scope=db_api.ONE) if not group: raise sg_ext.SecurityGroupNotFound(id=group_id) if group: _perform_async_update_rule(context, group_id, group, rule.id, RULE_UPDATE) return v._make_security_group_rule_dict(db_rule)
0.000901
def parse_routing_info(cls, records): """ Parse the records returned from a getServers call and return a new RoutingTable instance. """ if len(records) != 1: raise RoutingProtocolError("Expected exactly one record") record = records[0] routers = [] readers = [] writers = [] try: servers = record["servers"] for server in servers: role = server["role"] addresses = [] for address in server["addresses"]: addresses.append(SocketAddress.parse(address, DEFAULT_PORT)) if role == "ROUTE": routers.extend(addresses) elif role == "READ": readers.extend(addresses) elif role == "WRITE": writers.extend(addresses) ttl = record["ttl"] except (KeyError, TypeError): raise RoutingProtocolError("Cannot parse routing info") else: return cls(routers, readers, writers, ttl)
0.002737
def _extract(self): # pragma: no cover """ Extract the expiration date from the whois record. :return: The status of the domain. :rtype: str """ # We try to get the expiration date from the database. expiration_date_from_database = Whois().get_expiration_date() if expiration_date_from_database: # The hash of the current whois record did not changed and the # expiration date from the database is not empty not equal to # None or False. # We generate the files and print the status. # It's an active element! Generate( PyFunceble.STATUS["official"]["up"], "WHOIS", expiration_date_from_database, ).status_file() # We handle und return the official up status. return PyFunceble.STATUS["official"]["up"] # We get the whois record. self.whois_record = Lookup().whois(PyFunceble.INTERN["referer"]) # We list the list of regex which will help us get an unformatted expiration date. to_match = [ r"expire:(.*)", r"expire on:(.*)", r"Expiry Date:(.*)", r"free-date(.*)", r"expires:(.*)", r"Expiration date:(.*)", r"Expiry date:(.*)", r"Expire Date:(.*)", r"renewal date:(.*)", r"Expires:(.*)", r"validity:(.*)", r"Expiration Date :(.*)", r"Expiry :(.*)", r"expires at:(.*)", r"domain_datebilleduntil:(.*)", r"Data de expiração \/ Expiration Date \(dd\/mm\/yyyy\):(.*)", r"Fecha de expiración \(Expiration date\):(.*)", r"\[Expires on\](.*)", r"Record expires on(.*)(\(YYYY-MM-DD\))", r"status: OK-UNTIL(.*)", r"renewal:(.*)", r"expires............:(.*)", r"expire-date:(.*)", r"Exp date:(.*)", r"Valid-date(.*)", r"Expires On:(.*)", r"Fecha de vencimiento:(.*)", r"Expiration:.........(.*)", r"Fecha de Vencimiento:(.*)", r"Registry Expiry Date:(.*)", r"Expires on..............:(.*)", r"Expiration Time:(.*)", r"Expiration Date:(.*)", r"Expired:(.*)", r"Date d'expiration:(.*)", r"expiration date:(.*)", ] if self.whois_record: # The whois record is not empty. if "current_test_data" in PyFunceble.INTERN: # The end-user want more information whith his test. # We update the whois_record index. PyFunceble.INTERN["current_test_data"][ "whois_record" ] = self.whois_record for string in to_match: # We loop through the list of regex. # We try tro extract the expiration date from the WHOIS record. expiration_date = Regex( self.whois_record, string, return_data=True, rematch=True, group=0 ).match() if expiration_date: # The expiration date could be extracted. # We get the extracted expiration date. self.expiration_date = expiration_date[0].strip() # We initate a regex which will help us know if a number # is present into the extracted expiration date. regex_rumbers = r"[0-9]" if Regex( self.expiration_date, regex_rumbers, return_data=False ).match(): # The extracted expiration date has a number. # We format the extracted expiration date. self.expiration_date = self._format() if ( self.expiration_date and not Regex( self.expiration_date, r"[0-9]{2}\-[a-z]{3}\-2[0-9]{3}", return_data=False, ).match() ): # The formatted expiration date does not match our unified format. # We log the problem. Logs().expiration_date(self.expiration_date) # We log the whois record. Logs().whois(self.whois_record) if "current_test_data" in PyFunceble.INTERN: # The end-user want more information whith his test. # We update the expiration_date index. PyFunceble.INTERN["current_test_data"][ "expiration_date" ] = self.expiration_date # We generate the files and print the status. # It's an active element! Generate( PyFunceble.STATUS["official"]["up"], "WHOIS", self.expiration_date, ).status_file() # We log the whois record. Logs().whois(self.whois_record) # We save the whois record into the database. Whois(expiration_date=self.expiration_date).add() # We handle und return the official up status. return PyFunceble.STATUS["official"]["up"] # The extracted expiration date does not have a number. # We log the whois record. Logs().whois(self.whois_record) # We return None, we could not get the expiration date. return None # The whois record is empty. # We return None, we could not get the whois record. return None
0.000965
def write_phosphopath(df, f, extra_columns=None): """ Write out the data frame of phosphosites in the following format:: protein, protein-Rsite, Rsite, multiplicity Q13619 Q13619-S10 S10 1 Q9H3Z4 Q9H3Z4-S10 S10 1 Q6GQQ9 Q6GQQ9-S100 S100 1 Q86YP4 Q86YP4-S100 S100 1 Q9H307 Q9H307-S100 S100 1 Q8NEY1 Q8NEY1-S1000 S1000 1 The file is written as a comma-separated (CSV) file to file ``f``. :param df: :param f: :return: """ proteins = [_protein_id(k) for k in df.index.get_level_values('Proteins')] amino_acids = df.index.get_level_values('Amino acid') positions = _get_positions(df) multiplicity = [k[-1] for k in df.index.get_level_values('Multiplicity')] apos = ["%s%s" % x for x in zip(amino_acids, positions)] prar = ["%s-%s" % x for x in zip(proteins, apos)] phdf = pd.DataFrame(np.array(list(zip(proteins, prar, apos, multiplicity)))) if extra_columns: for c in extra_columns: phdf[c] = df[c].values phdf.to_csv(f, sep='\t', index=None, header=None)
0.00182
def removePeer(self, url): """ Remove peers by URL. """ q = models.Peer.delete().where( models.Peer.url == url) q.execute()
0.011429
def response_add(self, request, obj): """ Enforce page permissions and maintain the parent ID in the querystring. """ response = super(PageAdmin, self).response_add(request, obj) return self._maintain_parent(request, response)
0.007299
def fast_corr(x, y=None, destination=None): """calculate the pearson correlation matrix for the columns of x (with dimensions MxN), or optionally, the pearson correlaton matrix between x and y (with dimensions OxP). If destination is provided, put the results there. In the language of statistics the columns are the variables and the rows are the observations. Args: x (numpy array-like) MxN in shape y (optional, numpy array-like) OxP in shape. M (# rows in x) must equal O (# rows in y) destination (numpy array-like) optional location where to store the results as they are calculated (e.g. a numpy memmap of a file) returns (numpy array-like) array of the covariance values for defaults (y=None), shape is NxN if y is provied, shape is NxP """ if y is None: y = x r = fast_cov.fast_cov(x, y, destination) std_x = numpy.std(x, axis=0, ddof=1) std_y = numpy.std(y, axis=0, ddof=1) numpy.divide(r, std_x[:, numpy.newaxis], out=r) numpy.divide(r, std_y[numpy.newaxis, :], out=r) return r
0.006239
def preprocess_with_pca(adata, n_pcs=None, random_state=0): """ Parameters ---------- n_pcs : `int` or `None`, optional (default: `None`) If `n_pcs=0`, do not preprocess with PCA. If `None` and there is a PCA version of the data, use this. If an integer, compute the PCA. """ if n_pcs == 0: logg.info(' using data matrix X directly (no PCA)') return adata.X elif n_pcs is None and 'X_pca' in adata.obsm_keys(): logg.info(' using \'X_pca\' with n_pcs = {}' .format(adata.obsm['X_pca'].shape[1])) return adata.obsm['X_pca'] elif ('X_pca' in adata.obsm_keys() and adata.obsm['X_pca'].shape[1] >= n_pcs): logg.info(' using \'X_pca\' with n_pcs = {}' .format(n_pcs)) return adata.obsm['X_pca'][:, :n_pcs] else: n_pcs = N_PCS if n_pcs is None else n_pcs if adata.X.shape[1] > n_pcs: logg.info(' computing \'X_pca\' with n_pcs = {}'.format(n_pcs)) logg.hint('avoid this by setting n_pcs = 0') X = pca(adata.X, n_comps=n_pcs, random_state=random_state) adata.obsm['X_pca'] = X return X else: logg.info(' using data matrix X directly (no PCA)') return adata.X
0.000754
def readf(prompt, default=None, minval=None, maxval=None, allowed_single_chars=None, question_mark=True): """Return integer value read from keyboard Parameters ---------- prompt : str Prompt string. default : float or None Default value. minval : float or None Mininum allowed value. maxval : float or None Maximum allowed value. allowed_single_chars : str String containing allowed valid characters. question_mark : bool If True, display question mark after prompt. Returns ------- result : float Read value. """ return read_value(ftype=float, prompt=prompt, default=default, minval=minval, maxval=maxval, allowed_single_chars=allowed_single_chars, question_mark=question_mark)
0.001066
def snapengage(parser, token): """ SnapEngage set-up template tag. Renders Javascript code to set-up SnapEngage chat. You must supply your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return SnapEngageNode()
0.002703
def _load_names(self) -> List[str]: """Return list of thirdparty modules from requirements """ names = [] for path in self._get_files(): for name in self._get_names(path): names.append(self._normalize_name(name)) return names
0.006826
def worker_start(obj, queues, name, celery_args): """ Start a worker process. \b CELERY_ARGS: Additional Celery worker command line arguments. """ try: start_worker(queues=queues.split(','), config=obj['config'], name=name, celery_args=celery_args) except DataStoreNotConnected: click.echo(_style(obj['show_color'], 'Cannot connect to the Data Store server. Is the server running?', fg='red', bold=True))
0.003571
def to_png(data, size, level=6, output=None): # type: (bytes, Tuple[int, int], int, Optional[str]) -> Optional[bytes] """ Dump data to a PNG file. If `output` is `None`, create no file but return the whole PNG data. :param bytes data: RGBRGB...RGB data. :param tuple size: The (width, height) pair. :param int level: PNG compression level. :param str output: Output file name. """ width, height = size line = width * 3 png_filter = struct.pack(">B", 0) scanlines = b"".join( [png_filter + data[y * line : y * line + line] for y in range(height)] ) magic = struct.pack(">8B", 137, 80, 78, 71, 13, 10, 26, 10) # Header: size, marker, data, CRC32 ihdr = [b"", b"IHDR", b"", b""] ihdr[2] = struct.pack(">2I5B", width, height, 8, 2, 0, 0, 0) ihdr[3] = struct.pack(">I", zlib.crc32(b"".join(ihdr[1:3])) & 0xFFFFFFFF) ihdr[0] = struct.pack(">I", len(ihdr[2])) # Data: size, marker, data, CRC32 idat = [b"", b"IDAT", zlib.compress(scanlines, level), b""] idat[3] = struct.pack(">I", zlib.crc32(b"".join(idat[1:3])) & 0xFFFFFFFF) idat[0] = struct.pack(">I", len(idat[2])) # Footer: size, marker, None, CRC32 iend = [b"", b"IEND", b"", b""] iend[3] = struct.pack(">I", zlib.crc32(iend[1]) & 0xFFFFFFFF) iend[0] = struct.pack(">I", len(iend[2])) if not output: # Returns raw bytes of the whole PNG data return magic + b"".join(ihdr + idat + iend) with open(output, "wb") as fileh: fileh.write(magic) fileh.write(b"".join(ihdr)) fileh.write(b"".join(idat)) fileh.write(b"".join(iend)) return None
0.001195
def print_code(co, lasti= -1, level=0): """Disassemble a code object.""" code = co.co_code for constant in co.co_consts: print( '| |' * level, end=' ') print( 'constant:', constant) labels = findlabels(code) linestarts = dict(findlinestarts(co)) n = len(code) i = 0 extended_arg = 0 free = None while i < n: have_inner = False c = code[i] op = co_ord(c) if i in linestarts: if i > 0: print() print( '| |' * level, end=' ') print( "%3d" % linestarts[i], end=' ') else: print( '| |' * level, end=' ') print(' ', end=' ') if i == lasti: print( '-->',end=' ') else: print( ' ', end=' ') if i in labels: print( '>>', end=' ') else: print( ' ',end=' ') print(repr(i).rjust(4), end=' ') print(opcode.opname[op].ljust(20), end=' ') i = i + 1 if op >= opcode.HAVE_ARGUMENT: oparg = co_ord(code[i]) + co_ord(code[i + 1]) * 256 + extended_arg extended_arg = 0 i = i + 2 if op == opcode.EXTENDED_ARG: extended_arg = oparg * 65536 print( repr(oparg).rjust(5), end=' ') if op in opcode.hasconst: print( '(' + repr(co.co_consts[oparg]) + ')', end=' ') if type(co.co_consts[oparg]) == types.CodeType: have_inner = co.co_consts[oparg] elif op in opcode.hasname: print( '(' + co.co_names[oparg] + ')',end=' ') elif op in opcode.hasjrel: print('(to ' + repr(i + oparg) + ')', end=' ') elif op in opcode.haslocal: print('(' + co.co_varnames[oparg] + ')', end=' ') elif op in opcode.hascompare: print('(' + opcode.cmp_op[oparg] + ')', end=' ') elif op in opcode.hasfree: if free is None: free = co.co_cellvars + co.co_freevars print('(' + free[oparg] + ')', end=' ') print() if have_inner is not False: print_code(have_inner, level=level + 1)
0.010601
def get_prep_value(self, value): """Convert JSON object to a string""" if self.null and value is None: return None return json.dumps(value, **self.dump_kwargs)
0.010256
def log_target_types(all_logs=False, **kwargs): """ Log targets for log tasks. A log target defines the log types that will be affected by the operation. For example, when creating a DeleteLogTask, you can specify which log types are deleted. :param bool for_alert_event_log: alert events traces (default: False) :param bool for_alert_log: alerts (default: False) :param bool for_fw_log: FW logs (default: False) :param bool for_ips_log: IPS logs (default: False) :param bool for_ips_recording: any IPS pcaps (default: False) :param bool for_l2fw_log: layer 2 FW logs (default: False) :param bool for_third_party_log: any 3rd party logs (default: False) :return: dict of log targets """ log_types = { 'for_alert_event_log': False, 'for_alert_log': False, 'for_audit_log': False, 'for_fw_log': False, 'for_ips_log': False, 'for_ips_recording_log': False, 'for_l2fw_log': False, 'for_third_party_log': False} if all_logs: for key in log_types.keys(): log_types[key] = True else: for key, value in kwargs.items(): log_types[key] = value return log_types
0.003234
def remove(name, stop=False): ''' Remove the named container .. warning:: This function will remove all data associated with the container. It will not, however, remove the btrfs subvolumes created by pulling container images (:mod:`nspawn.pull_raw <salt.modules.nspawn.pull_raw>`, :mod:`nspawn.pull_tar <salt.modules.nspawn.pull_tar>`, :mod:`nspawn.pull_dkr <salt.modules.nspawn.pull_dkr>`). stop : False If ``True``, the container will be destroyed even if it is running/frozen. CLI Examples: .. code-block:: bash salt '*' nspawn.remove foo salt '*' nspawn.remove foo stop=True ''' if not stop and state(name) != 'stopped': raise CommandExecutionError( 'Container \'{0}\' is not stopped'.format(name) ) def _failed_remove(name, exc): raise CommandExecutionError( 'Unable to remove container \'{0}\': {1}'.format(name, exc) ) if _sd_version() >= 219: ret = _machinectl('remove {0}'.format(name)) if ret['retcode'] != 0: __context__['retcode'] = salt.defaults.exitcodes.EX_UNAVAILABLE _failed_remove(name, ret['stderr']) else: try: shutil.rmtree(os.path.join(_root(), name)) except OSError as exc: _failed_remove(name, exc) return True
0.000711
def add_greenlet_name( _logger: str, _method_name: str, event_dict: Dict[str, Any], ) -> Dict[str, Any]: """Add greenlet_name to the event dict for greenlets that have a non-default name.""" current_greenlet = gevent.getcurrent() greenlet_name = getattr(current_greenlet, 'name', None) if greenlet_name is not None and not greenlet_name.startswith('Greenlet-'): event_dict['greenlet_name'] = greenlet_name return event_dict
0.004211
def check_correct_audience(self, audience): "Assert that Dataporten sends back our own client id as audience" client_id, _ = self.get_key_and_secret() if audience != client_id: raise AuthException('Wrong audience')
0.008
def dump(self, obj, key=None): """Write a pickled representation of obj to the open TFile.""" if key is None: key = '_pickle' with preserve_current_directory(): self.__file.cd() if sys.version_info[0] < 3: pickle.Pickler.dump(self, obj) else: super(Pickler, self).dump(obj) s = ROOT.TObjString(self.__io.getvalue()) self.__io.reopen() s.Write(key) self.__file.GetFile().Flush() self.__pmap.clear()
0.003565
def plot(self, data, color='k', symbol=None, line_kind='-', width=1., marker_size=10., edge_color='k', face_color='b', edge_width=1., title=None, xlabel=None, ylabel=None): """Plot a series of data using lines and markers Parameters ---------- data : array | two arrays Arguments can be passed as ``(Y,)``, ``(X, Y)`` or ``np.array((X, Y))``. color : instance of Color Color of the line. symbol : str Marker symbol to use. line_kind : str Kind of line to draw. For now, only solid lines (``'-'``) are supported. width : float Line width. marker_size : float Marker size. If `size == 0` markers will not be shown. edge_color : instance of Color Color of the marker edge. face_color : instance of Color Color of the marker face. edge_width : float Edge width of the marker. title : str | None The title string to be displayed above the plot xlabel : str | None The label to display along the bottom axis ylabel : str | None The label to display along the left axis. Returns ------- line : instance of LinePlot The line plot. See also -------- marker_types, LinePlot """ self._configure_2d() line = scene.LinePlot(data, connect='strip', color=color, symbol=symbol, line_kind=line_kind, width=width, marker_size=marker_size, edge_color=edge_color, face_color=face_color, edge_width=edge_width) self.view.add(line) self.view.camera.set_range() self.visuals.append(line) if title is not None: self.title.text = title if xlabel is not None: self.xlabel.text = xlabel if ylabel is not None: self.ylabel.text = ylabel return line
0.001848
def _weightfun_spatial_distance(data, params, report): """ Creates the weights for the spatial distance method. See func: teneto.derive.derive. """ distance = getDistanceFunction(params['distance']) weights = np.array([distance(data[n, :], data[t, :]) for n in np.arange( 0, data.shape[0]) for t in np.arange(0, data.shape[0])]) weights = np.reshape(weights, [data.shape[0], data.shape[0]]) np.fill_diagonal(weights, np.nan) weights = 1 / weights weights = (weights - np.nanmin(weights)) / \ (np.nanmax(weights) - np.nanmin(weights)) np.fill_diagonal(weights, 1) return weights, report
0.003101
def copy(self): """ Return a copy of this ProtoFeed, that is, a feed with all the same attributes. """ other = ProtoFeed() for key in cs.PROTOFEED_ATTRS: value = getattr(self, key) if isinstance(value, pd.DataFrame): # Pandas copy DataFrame value = value.copy() setattr(other, key, value) return other
0.004695
def add_error(self, error): """Record an error from expect APIs. This method generates a position stamp for the expect. The stamp is composed of a timestamp and the number of errors recorded so far. Args: error: Exception or signals.ExceptionRecord, the error to add. """ self._count += 1 self._record.add_error('expect@%s+%s' % (time.time(), self._count), error)
0.00431
def construct_formset(self): """ Overrides construct_formset to attach the model class as an attribute of the returned formset instance. """ formset = super(InlineFormSetFactory, self).construct_formset() formset.model = self.inline_model return formset
0.006472
def _set_rsvp_authentication(self, v, load=False): """ Setter method for rsvp_authentication, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface/rsvp/rsvp_authentication (container) If this variable is read-only (config: false) in the source YANG file, then _set_rsvp_authentication is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rsvp_authentication() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=rsvp_authentication.rsvp_authentication, is_container='container', presence=False, yang_name="rsvp-authentication", rest_name="authentication", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable RSVP authentication on this interface', u'alt-name': u'authentication', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """rsvp_authentication must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=rsvp_authentication.rsvp_authentication, is_container='container', presence=False, yang_name="rsvp-authentication", rest_name="authentication", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable RSVP authentication on this interface', u'alt-name': u'authentication', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""", }) self.__rsvp_authentication = t if hasattr(self, '_set'): self._set()
0.005184
def nextprefix(self): """ Get the next available prefix. This means a prefix starting with 'ns' with a number appended as (ns0, ns1, ..) that is not already defined on the wsdl document. """ used = [ns[0] for ns in self.prefixes] used += [ns[0] for ns in self.wsdl.root.nsprefixes.items()] for n in range(0, 1024): p = 'ns%d' % n if p not in used: return p raise Exception('prefixes exhausted')
0.003945
def write_file(self, filename, cart_coords=False): """ Write the input string into a file Option: see __str__ method """ with zopen(filename, "w") as f: f.write(self.to_string(cart_coords))
0.008264
def create_namespaced_event(self, namespace, body, **kwargs): """ create an Event This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_namespaced_event(namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Event body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1Event If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_namespaced_event_with_http_info(namespace, body, **kwargs) else: (data) = self.create_namespaced_event_with_http_info(namespace, body, **kwargs) return data
0.005051
def get_key(s): ''' Get data between [ and ] remove ' if exist @param s: string to process ''' start = s.find("[") end = s.find("]") if start == -1 or end == -1: return None if s[start + 1] == "'": start += 1 if s[end - 1] == "'": end -= 1 return s[start + 1:end]
0.002941
def Read(self, length): """Read from the file.""" if not self.IsFile(): raise IOError("%s is not a file." % self.pathspec.last.path) available = min(self.size - self.offset, length) if available > 0: # This raises a RuntimeError in some situations. try: data = self.fd.read_random(self.offset, available, self.pathspec.last.ntfs_type, self.pathspec.last.ntfs_id) except RuntimeError as e: raise IOError(e) self.offset += len(data) return data return b""
0.011706
def get_event_question(self, id, question_id, **data): """ GET /events/:id/questions/:question_id/ This endpoint will return :format:`question` for a specific question id. """ return self.get("/events/{0}/questions/{0}/".format(id,question_id), data=data)
0.019737
def check_grid_aligned(vol, img, offset): """Returns (is_aligned, img bounds Bbox, nearest bbox inflated to grid aligned)""" shape = Vec(*img.shape)[:3] offset = Vec(*offset)[:3] bounds = Bbox( offset, shape + offset) alignment_check = bounds.expand_to_chunk_size(vol.underlying, vol.voxel_offset) alignment_check = Bbox.clamp(alignment_check, vol.bounds) is_aligned = np.all(alignment_check.minpt == bounds.minpt) and np.all(alignment_check.maxpt == bounds.maxpt) return (is_aligned, bounds, alignment_check)
0.024762
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'dialog_node') and self.dialog_node is not None: _dict['dialog_node'] = self.dialog_node if hasattr(self, 'description') and self.description is not None: _dict['description'] = self.description if hasattr(self, 'conditions') and self.conditions is not None: _dict['conditions'] = self.conditions if hasattr(self, 'parent') and self.parent is not None: _dict['parent'] = self.parent if hasattr(self, 'previous_sibling') and self.previous_sibling is not None: _dict['previous_sibling'] = self.previous_sibling if hasattr(self, 'output') and self.output is not None: _dict['output'] = self.output._to_dict() if hasattr(self, 'context') and self.context is not None: _dict['context'] = self.context if hasattr(self, 'metadata') and self.metadata is not None: _dict['metadata'] = self.metadata if hasattr(self, 'next_step') and self.next_step is not None: _dict['next_step'] = self.next_step._to_dict() if hasattr(self, 'title') and self.title is not None: _dict['title'] = self.title if hasattr(self, 'node_type') and self.node_type is not None: _dict['type'] = self.node_type if hasattr(self, 'event_name') and self.event_name is not None: _dict['event_name'] = self.event_name if hasattr(self, 'variable') and self.variable is not None: _dict['variable'] = self.variable if hasattr(self, 'actions') and self.actions is not None: _dict['actions'] = [x._to_dict() for x in self.actions] if hasattr(self, 'digress_in') and self.digress_in is not None: _dict['digress_in'] = self.digress_in if hasattr(self, 'digress_out') and self.digress_out is not None: _dict['digress_out'] = self.digress_out if hasattr(self, 'digress_out_slots') and self.digress_out_slots is not None: _dict['digress_out_slots'] = self.digress_out_slots if hasattr(self, 'user_label') and self.user_label is not None: _dict['user_label'] = self.user_label if hasattr(self, 'disabled') and self.disabled is not None: _dict['disabled'] = self.disabled if hasattr(self, 'created') and self.created is not None: _dict['created'] = datetime_to_string(self.created) if hasattr(self, 'updated') and self.updated is not None: _dict['updated'] = datetime_to_string(self.updated) return _dict
0.000731
def _to_pandas(ob): """Convert an array-like to a pandas object. Parameters ---------- ob : array-like The object to convert. Returns ------- pandas_structure : pd.Series or pd.DataFrame The correct structure based on the dimensionality of the data. """ if isinstance(ob, (pd.Series, pd.DataFrame)): return ob if ob.ndim == 1: return pd.Series(ob) elif ob.ndim == 2: return pd.DataFrame(ob) else: raise ValueError( 'cannot convert array of dim > 2 to a pandas structure', )
0.001689
def create_payload(self): """Rename the payload key "prior_id" to "prior". For more information, see `Bugzilla #1238757 <https://bugzilla.redhat.com/show_bug.cgi?id=1238757>`_. """ payload = super(LifecycleEnvironment, self).create_payload() if (_get_version(self._server_config) < Version('6.1') and 'prior_id' in payload): payload['prior'] = payload.pop('prior_id') return payload
0.004274
def get_terreinobject_by_id(self, id): ''' Retrieve a `Terreinobject` by the Id. :param integer id: the Id of the `Terreinobject` :rtype: :class:`Terreinobject` ''' def creator(): res = crab_gateway_request( self.client, 'GetTerreinobjectByIdentificatorTerreinobject', id ) if res == None: raise GatewayResourceNotFoundException() return Terreinobject( res.IdentificatorTerreinobject, res.AardTerreinobject, (res.CenterX, res.CenterY), (res.MinimumX, res.MinimumY, res.MaximumX, res.MaximumY), Metadata( res.BeginDatum, res.BeginTijd, self.get_bewerking(res.BeginBewerking), self.get_organisatie(res.BeginOrganisatie) ) ) if self.caches['short'].is_configured: key = 'GetTerreinobjectByIdentificatorTerreinobject#%s' % (id) terreinobject = self.caches['short'].get_or_create(key, creator) else: terreinobject = creator() terreinobject.set_gateway(self) return terreinobject
0.002355
def find_rows_by_string(tab, names, colnames=['assoc']): """Find the rows in a table ``tab`` that match at least one of the strings in ``names``. This method ignores whitespace and case when matching strings. Parameters ---------- tab : `astropy.table.Table` Table that will be searched. names : list List of strings. colname : str Name of the table column that will be searched for matching string. Returns ------- mask : `~numpy.ndarray` Boolean mask for rows with matching strings. """ mask = np.empty(len(tab), dtype=bool) mask.fill(False) names = [name.lower().replace(' ', '') for name in names] for colname in colnames: if colname not in tab.columns: continue col = tab[[colname]].copy() col[colname] = defchararray.replace(defchararray.lower(col[colname]).astype(str), ' ', '') for name in names: mask |= col[colname] == name return mask
0.002868
def int_args(self): """ Iterate through all the possible arg positions that can only be used to store integer or pointer values Does not take into account customizations. Returns an iterator of SimFunctionArguments """ if self.ARG_REGS is None: raise NotImplementedError() for reg in self.ARG_REGS: # pylint: disable=not-an-iterable yield SimRegArg(reg, self.arch.bytes)
0.006494
def _init_records(self, record_types): """Initalize all records for this form.""" for record_type in record_types: # This conditional was inserted on 7/11/14. It may prove problematic: if str(record_type) not in self._my_map['recordTypeIds']: record_initialized = self._init_record(str(record_type)) if record_initialized: self._my_map['recordTypeIds'].append(str(record_type))
0.006397
def _expand_possible_file_value(self, value): """If the value is a file, returns its contents. Otherwise return the original value.""" if value and os.path.isfile(str(value)): with open(value, 'r') as f: return f.read() return value
0.011628
def index(self, record): """Index a record. The caller is responsible for ensuring that the record has already been committed to the database. If a newer version of a record has already been indexed then the provided record will not be indexed. This behavior can be controlled by providing a different ``version_type`` when initializing ``RecordIndexer``. :param record: Record instance. """ index, doc_type = self.record_to_index(record) return self.client.index( id=str(record.id), version=record.revision_id, version_type=self._version_type, index=index, doc_type=doc_type, body=self._prepare_record(record, index, doc_type), )
0.002522
def encrypt(self, plaintext, encoder=encoding.RawEncoder): """ Encrypts the plaintext message using a random-generated ephemeral keypair and returns a "composed ciphertext", containing both the public part of the keypair and the ciphertext proper, encoded with the encoder. The private part of the ephemeral key-pair will be scrubbed before returning the ciphertext, therefore, the sender will not be able to decrypt the generated ciphertext. :param plaintext: [:class:`bytes`] The plaintext message to encrypt :param encoder: The encoder to use to encode the ciphertext :return bytes: encoded ciphertext """ ciphertext = nacl.bindings.crypto_box_seal( plaintext, self._public_key ) encoded_ciphertext = encoder.encode(ciphertext) return encoded_ciphertext
0.002191
def new_with_fixed_mpi_omp(self, mpi_procs, omp_threads): """ Return a new `TaskManager` in which autoparal has been disabled. The jobs will be executed with `mpi_procs` MPI processes and `omp_threads` OpenMP threads. Useful for generating input files for benchmarks. """ new = self.deepcopy() new.policy.autoparal = 0 new.set_mpi_procs(mpi_procs) new.set_omp_threads(omp_threads) return new
0.006369
def _validate_response(self, method, response): ''' Helper method to validate the given to a Wunderlist API request is as expected ''' # TODO Fill this out using the error codes here: https://developer.wunderlist.com/documentation/concepts/formats # The expected results can change based on API version, so validate this here if self.api_version: if response.status_code >= 400: raise ValueError('{} {}'.format(response.status_code, str(response.json()))) if method == 'GET': assert response.status_code == 200 elif method == 'POST': assert response.status_code == 201 elif method == 'PATCH': assert response.status_code == 200 elif method == 'DELETE': assert response.status_code == 204
0.006985
def syncStateCall(self, method, url, params={}, **kwargs): """ Follow and track sync state URLs provided by an API endpoint, in order to implicitly handle pagination. In the first call, ``url`` and ``params`` are used as-is. If a ``syncState`` endpoint is provided in the response, subsequent calls go to the latest URL instead. Args: method (str): HTTP request method url (str): full URL to connect to params (dict): query parameters to include in the URL kwargs (dict): any extra parameters to pass to :meth:`__call__` """ try: states = self.syncStates[(method, url)] except KeyError: states = self.syncStates[(method, url)] = [] if states: # We have a state link, use it to replace the URL and query string. url = states[-1] params = {} resp = self(method, url, params=params, **kwargs) try: json = resp.json() except ValueError: # Don't do anything if not a JSON response. pass else: # If a state link exists in the response, store it for later. state = json.get("_metadata", {}).get("syncState") if state: states.append(state) return resp
0.002954
def use_plenary_grade_entry_view(self): """Pass through to provider GradeEntryLookupSession.use_plenary_grade_entry_view""" self._object_views['grade_entry'] = PLENARY # self._get_provider_session('grade_entry_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_grade_entry_view() except AttributeError: pass
0.008511
def _GenerateZipInfo(self, arcname=None, compress_type=None, st=None): """Generate ZipInfo instance for the given name, compression and stat. Args: arcname: The name in the archive this should take. compress_type: Compression type (zipfile.ZIP_DEFLATED, or ZIP_STORED) st: An optional stat object to be used for setting headers. Returns: ZipInfo instance. Raises: ValueError: If arcname is not provided. """ # Fake stat response. if st is None: # TODO(user):pytype: stat_result typing is not correct. # pytype: disable=wrong-arg-count st = os.stat_result((0o100644, 0, 0, 0, 0, 0, 0, 0, 0, 0)) # pytype: enable=wrong-arg-count mtime = time.localtime(st.st_mtime or time.time()) date_time = mtime[0:6] # Create ZipInfo instance to store file information if arcname is None: raise ValueError("An arcname must be provided.") zinfo = zipfile.ZipInfo(arcname, date_time) zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes if compress_type is None: zinfo.compress_type = self._compression else: zinfo.compress_type = compress_type zinfo.file_size = 0 zinfo.compress_size = 0 zinfo.flag_bits = 0x08 # Setting data descriptor flag. zinfo.CRC = 0x08074b50 # Predefined CRC for archives using data # descriptors. # This fills an empty Info-ZIP Unix extra field. zinfo.extra = struct.pack( "<HHIIHH", 0x5855, 12, 0, # time of last access (UTC/GMT) 0, # time of last modification (UTC/GMT) 0, # user ID 0) # group ID return zinfo
0.004811
def enableGroup(self): """Enables all radio buttons in the group.""" radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group] for radioButton in radioButtonListInGroup: radioButton.enable()
0.01083
def getRenderModelErrorNameFromEnum(self, error): """Returns a string for a render model error""" fn = self.function_table.getRenderModelErrorNameFromEnum result = fn(error) return result
0.009091
def remove(self, uids: Iterable[int]) -> None: """Remove any session flags for the given message. Args: uids: The message UID values. """ for uid in uids: self._recent.discard(uid) self._flags.pop(uid, None)
0.00722
def _pop_translated_data(self): """ Separate data of translated fields from other data. """ translated_data = {} for meta in self.Meta.model._parler_meta: translations = self.validated_data.pop(meta.rel_name, {}) if translations: translated_data[meta.rel_name] = translations return translated_data
0.005181
def _numbers_decades(N): """ >>> _numbers_decades(45) ' 1 2 3 4' """ N = N // 10 lst = range(1, N + 1) return "".join(map(lambda i: "%10s" % i, lst))
0.004785