text
stringlengths
78
104k
score
float64
0
0.18
def icetea_main(): """ Main function for running Icetea. Calls sys.exit with the return code to exit. :return: Nothing. """ from icetea_lib import IceteaManager manager = IceteaManager.IceteaManager() return_code = manager.run() sys.exit(return_code)
0.007067
def handle_enterprise_logistration(backend, user, **kwargs): """ Perform the linking of user in the process of logging to the Enterprise Customer. Args: backend: The class handling the SSO interaction (SAML, OAuth, etc) user: The user object in the process of being logged in with **kwargs: Any remaining pipeline variables """ request = backend.strategy.request enterprise_customer = get_enterprise_customer_for_running_pipeline( request, { 'backend': backend.name, 'kwargs': kwargs } ) if enterprise_customer is None: # This pipeline element is not being activated as a part of an Enterprise logistration return # proceed with the creation of a link between the user and the enterprise customer, then exit. enterprise_customer_user, _ = EnterpriseCustomerUser.objects.update_or_create( enterprise_customer=enterprise_customer, user_id=user.id ) enterprise_customer_user.update_session(request)
0.004762
def read_set_from_file(filename: str) -> Set[str]: """ Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. """ collection = set() with open(filename, 'r') as file_: for line in file_: collection.add(line.rstrip()) return collection
0.003058
def _dispatch(self, operation, request, path_args): """ Wrapped dispatch method, prepare request and generate a HTTP Response. """ # Determine the request and response types. Ensure API supports the requested types request_type = resolve_content_type(self.request_type_resolvers, request) request_type = self.remap_codecs.get(request_type, request_type) try: request.request_codec = self.registered_codecs[request_type] except KeyError: return HttpResponse.from_status(HTTPStatus.UNPROCESSABLE_ENTITY) response_type = resolve_content_type(self.response_type_resolvers, request) response_type = self.remap_codecs.get(response_type, response_type) try: request.response_codec = self.registered_codecs[response_type] except KeyError: return HttpResponse.from_status(HTTPStatus.NOT_ACCEPTABLE) # Check if method is in our allowed method list if request.method not in operation.methods: return HttpResponse.from_status( HTTPStatus.METHOD_NOT_ALLOWED, {'Allow': ','.join(m.value for m in operation.methods)} ) # Response types resource, status, headers = self.dispatch_operation(operation, request, path_args) if isinstance(status, HTTPStatus): status = status.value # Return a HttpResponse and just send it! if isinstance(resource, HttpResponse): return resource # Encode the response return create_response(request, resource, status, headers)
0.003659
def compute(self, bottomUpInput, enableLearn, enableInference=None): """ Handle one compute, possibly learning. .. note:: It is an error to have both ``enableLearn`` and ``enableInference`` set to False .. note:: By default, we don't compute the inference output when learning because it slows things down, but you can override this by passing in True for ``enableInference``. :param bottomUpInput: The bottom-up input as numpy list, typically from a spatial pooler. :param enableLearn: (bool) If true, perform learning :param enableInference: (bool) If None, default behavior is to disable the inference output when ``enableLearn`` is on. If true, compute the inference output. If false, do not compute the inference output. :returns: TODO: document """ # As a speed optimization for now (until we need online learning), skip # computing the inference output while learning if enableInference is None: if enableLearn: enableInference = False else: enableInference = True assert (enableLearn or enableInference) # Get the list of columns that have bottom-up activeColumns = bottomUpInput.nonzero()[0] if enableLearn: self.lrnIterationIdx += 1 self.iterationIdx += 1 if self.verbosity >= 3: print "\n==== PY Iteration: %d =====" % (self.iterationIdx) print "Active cols:", activeColumns # Update segment duty cycles if we are crossing a "tier" # We determine if it's time to update the segment duty cycles. Since the # duty cycle calculation is a moving average based on a tiered alpha, it is # important that we update all segments on each tier boundary if enableLearn: if self.lrnIterationIdx in Segment.dutyCycleTiers: for c, i in itertools.product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)): for segment in self.cells[c][i]: segment.dutyCycle() # Update the average input density if self.avgInputDensity is None: self.avgInputDensity = len(activeColumns) else: self.avgInputDensity = (0.99 * self.avgInputDensity + 0.01 * len(activeColumns)) # First, update the inference state # As a speed optimization for now (until we need online learning), skip # computing the inference output while learning if enableInference: self._updateInferenceState(activeColumns) # Next, update the learning state if enableLearn: self._updateLearningState(activeColumns) # Apply global decay, and remove synapses and/or segments. # Synapses are removed if their permanence value is <= 0. # Segments are removed when they don't have synapses anymore. # Removal of synapses can trigger removal of whole segments! # todo: isolate the synapse/segment retraction logic so that # it can be called in adaptSegments, in the case where we # do global decay only episodically. if self.globalDecay > 0.0 and ((self.lrnIterationIdx % self.maxAge) == 0): for c, i in itertools.product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)): segsToDel = [] # collect and remove outside the loop for segment in self.cells[c][i]: age = self.lrnIterationIdx - segment.lastActiveIteration if age <= self.maxAge: continue synsToDel = [] # collect and remove outside the loop for synapse in segment.syns: synapse[2] = synapse[2] - self.globalDecay # decrease permanence if synapse[2] <= 0: synsToDel.append(synapse) # add to list to delete # 1 for sequenceSegment flag if len(synsToDel) == segment.getNumSynapses(): segsToDel.append(segment) # will remove the whole segment elif len(synsToDel) > 0: for syn in synsToDel: # remove some synapses on segment segment.syns.remove(syn) for seg in segsToDel: # remove some segments of this cell self._cleanUpdatesList(c, i, seg) self.cells[c][i].remove(seg) # Update the prediction score stats # Learning always includes inference if self.collectStats: if enableInference: predictedState = self.infPredictedState['t-1'] else: predictedState = self.lrnPredictedState['t-1'] self._updateStatsInferEnd(self._internalStats, activeColumns, predictedState, self.colConfidence['t-1']) # Finally return the TM output output = self._computeOutput() # Print diagnostic information based on the current verbosity level self.printComputeEnd(output, learn=enableLearn) self.resetCalled = False return output
0.009357
def year(self, value=None): """ We do *NOT* know for what year we are converting so lets assume the year has 365 days. """ if value is None: return self.day() / 365 else: self.millisecond(self.day(value * 365))
0.007092
def generate_file(project_dir, infile, context, env): """Render filename of infile as name of outfile, handle infile correctly. Dealing with infile appropriately: a. If infile is a binary file, copy it over without rendering. b. If infile is a text file, render its contents and write the rendered infile to outfile. Precondition: When calling `generate_file()`, the root template dir must be the current working directory. Using `utils.work_in()` is the recommended way to perform this directory change. :param project_dir: Absolute path to the resulting generated project. :param infile: Input file to generate the file from. Relative to the root template dir. :param context: Dict for populating the cookiecutter's variables. :param env: Jinja2 template execution environment. """ logger.debug('Processing file {}'.format(infile)) # Render the path to the output file (not including the root project dir) outfile_tmpl = env.from_string(infile) outfile = os.path.join(project_dir, outfile_tmpl.render(**context)) file_name_is_empty = os.path.isdir(outfile) if file_name_is_empty: logger.debug('The resulting file name is empty: {0}'.format(outfile)) return logger.debug('Created file at {0}'.format(outfile)) # Just copy over binary files. Don't render. logger.debug("Check {} to see if it's a binary".format(infile)) if is_binary(infile): logger.debug( 'Copying binary {} to {} without rendering' ''.format(infile, outfile) ) shutil.copyfile(infile, outfile) else: # Force fwd slashes on Windows for get_template # This is a by-design Jinja issue infile_fwd_slashes = infile.replace(os.path.sep, '/') # Render the file try: tmpl = env.get_template(infile_fwd_slashes) except TemplateSyntaxError as exception: # Disable translated so that printed exception contains verbose # information about syntax error location exception.translated = False raise rendered_file = tmpl.render(**context) logger.debug('Writing contents to file {}'.format(outfile)) with io.open(outfile, 'w', encoding='utf-8') as fh: fh.write(rendered_file) # Apply file permissions to output file shutil.copymode(infile, outfile)
0.000406
def render_text(text, preformatted=False): """ Return text formatted as a HTML Args: text: the text to render preformatted: whether the text should be rendered as preformatted """ return IPython.core.display.HTML(_html.HtmlBuilder.render_text(text, preformatted))
0.014286
def validate(self): """Validate the edges.""" for function in compat_itervalues(self.functions): for callee_id in compat_keys(function.calls): assert function.calls[callee_id].callee_id == callee_id if callee_id not in self.functions: sys.stderr.write('warning: call to undefined function %s from function %s\n' % (str(callee_id), function.name)) del function.calls[callee_id]
0.006289
def get_jar_url(version=None): """Get the URL to a Stanford CoreNLP jar file with a specific version. These jars come from Maven since the Maven version is smaller than the full CoreNLP distributions. Defaults to DEFAULT_CORENLP_VERSION.""" if version is None: version = DEFAULT_CORENLP_VERSION try: string_type = basestring except NameError: string_type = str if not isinstance(version, string_type): raise TypeError("Version must be a string or None (got %r)." % version) jar_filename = 'stanford-corenlp-%s.jar' % version return 'http://search.maven.org/remotecontent?filepath=' + \ 'edu/stanford/nlp/stanford-corenlp/%s/%s' % (version, jar_filename)
0.002257
def string_to_int( s ): """Convert a string of bytes into an integer, as per X9.62.""" result = 0 for c in s: if not isinstance(c, int): c = ord( c ) result = 256 * result + c return result
0.04878
def update_qos_aggregated_configuration(self, qos_configuration, timeout=-1): """ Updates the QoS aggregated configuration for the logical interconnect. Args: qos_configuration: QOS configuration. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Logical Interconnect. """ uri = "{}{}".format(self.data["uri"], self.QOS_AGGREGATED_CONFIGURATION) return self._helper.update(qos_configuration, uri=uri, timeout=timeout)
0.005865
def write(self, text, fg='black', bg='white'): '''write to the console''' if isinstance(text, str): sys.stdout.write(text) else: sys.stdout.write(str(text)) sys.stdout.flush() if self.udp.connected(): self.udp.writeln(text) if self.tcp.connected(): self.tcp.writeln(text)
0.00545
def remove_edge(self, u, v): """Version of remove_edge that's much like normal networkx but only deletes once, since the database doesn't keep separate adj and succ mappings """ try: del self.succ[u][v] except KeyError: raise NetworkXError( "The edge {}-{} is not in the graph.".format(u, v) )
0.005076
def fftshift(arr_obj, axes = None, res_g = None, return_buffer = False): """ gpu version of fftshift for numpy arrays or OCLArrays Parameters ---------- arr_obj: numpy array or OCLArray (float32/complex64) the array to be fftshifted axes: list or None the axes over which to shift (like np.fft.fftshift) if None, all axes are taken res_g: if given, fills it with the result (has to be same shape and dtype as arr_obj) else internally creates a new one Returns ------- if return_buffer, returns the result as (well :) OCLArray else returns the result as numpy array """ if axes is None: axes = list(range(arr_obj.ndim)) if isinstance(arr_obj, OCLArray): if not arr_obj.dtype.type in DTYPE_KERNEL_NAMES: raise NotImplementedError("only works for float32 or complex64") elif isinstance(arr_obj, np.ndarray): if np.iscomplexobj(arr_obj): arr_obj = OCLArray.from_array(arr_obj.astype(np.complex64,copy = False)) else: arr_obj = OCLArray.from_array(arr_obj.astype(np.float32,copy = False)) else: raise ValueError("unknown type (%s)"%(type(arr_obj))) if not np.all([arr_obj.shape[a]%2==0 for a in axes]): raise NotImplementedError("only works on axes of even dimensions") if res_g is None: res_g = OCLArray.empty_like(arr_obj) # iterate over all axes # FIXME: this is still rather inefficient in_g = arr_obj for ax in axes: _fftshift_single(in_g, res_g, ax) in_g = res_g if return_buffer: return res_g else: return res_g.get()
0.012964
def _get_tags(self, entity=None, tag_type=None): """Generate the tags for a given entity (container or image) according to a list of tag names.""" # Start with custom tags tags = list(self.custom_tags) # Collect pod names as tags on kubernetes if Platform.is_k8s() and KubeUtil.POD_NAME_LABEL not in self.collect_labels_as_tags: self.collect_labels_as_tags.append(KubeUtil.POD_NAME_LABEL) self.collect_labels_as_tags.append(KubeUtil.CONTAINER_NAME_LABEL) # Collect container names as tags on rancher if Platform.is_rancher(): if RANCHER_CONTAINER_NAME not in self.collect_labels_as_tags: self.collect_labels_as_tags.append(RANCHER_CONTAINER_NAME) if RANCHER_SVC_NAME not in self.collect_labels_as_tags: self.collect_labels_as_tags.append(RANCHER_SVC_NAME) if RANCHER_STACK_NAME not in self.collect_labels_as_tags: self.collect_labels_as_tags.append(RANCHER_STACK_NAME) if entity is not None: pod_name = None namespace = None # Get labels as tags labels = entity.get("Labels") if labels is not None: for k in self.collect_labels_as_tags: if k in labels: v = labels[k] if k == KubeUtil.POD_NAME_LABEL and Platform.is_k8s(): pod_name = v k = "pod_name" if "-" in pod_name: replication_controller = "-".join(pod_name.split("-")[:-1]) if "/" in replication_controller: # k8s <= 1.1 namespace, replication_controller = replication_controller.split("/", 1) elif KubeUtil.NAMESPACE_LABEL in labels: # k8s >= 1.2 namespace = labels[KubeUtil.NAMESPACE_LABEL] tags.append("kube_namespace:%s" % namespace) tags.append("kube_replication_controller:%s" % replication_controller) tags.append("pod_name:%s" % pod_name) elif k == KubeUtil.CONTAINER_NAME_LABEL and Platform.is_k8s(): if v: tags.append("kube_container_name:%s" % v) elif k == SWARM_SVC_LABEL and Platform.is_swarm(): if v: tags.append("swarm_service:%s" % v) elif k == RANCHER_CONTAINER_NAME and Platform.is_rancher(): if v: tags.append('rancher_container:%s' % v) elif k == RANCHER_SVC_NAME and Platform.is_rancher(): if v: tags.append('rancher_service:%s' % v) elif k == RANCHER_STACK_NAME and Platform.is_rancher(): if v: tags.append('rancher_stack:%s' % v) elif not v: tags.append(k) else: tags.append("%s:%s" % (k, v)) if k == KubeUtil.POD_NAME_LABEL and Platform.is_k8s() and k not in labels: tags.append("pod_name:no_pod") # Get entity specific tags if tag_type is not None: tag_names = self.tag_names[tag_type] for tag_name in tag_names: tag_value = self._extract_tag_value(entity, tag_name) if tag_value is not None: for t in tag_value: tags.append('%s:%s' % (tag_name, str(t).strip())) # Add kube labels and creator/service tags if Platform.is_k8s() and namespace and pod_name: kube_tags = self.kube_pod_tags.get("{0}/{1}".format(namespace, pod_name)) if kube_tags: tags.extend(list(kube_tags)) if self.metadata_collector.has_detected(): orch_tags = self.metadata_collector.get_container_tags(co=entity) tags.extend(orch_tags) return tags
0.003168
def DoesIDExist(tag_name): """ Determines if a fully-qualified site.service.tag eDNA tag exists in any of the connected services. :param tag_name: fully-qualified (site.service.tag) eDNA tag :return: true if the point exists, false if the point does not exist Example: >>> DoesIDExist("Site.Service.Tag") """ # the eDNA API requires that the tag_name be specified in a binary format, # and the ctypes library must be used to create a C++ variable type. szPoint = c_char_p(tag_name.encode('utf-8')) result = bool(dna_dll.DoesIdExist(szPoint)) return result
0.001592
def line(self, node, coords, close=False, **kwargs): """Draw a svg line""" line_len = len(coords) if len([c for c in coords if c[1] is not None]) < 2: return root = 'M%s L%s Z' if close else 'M%s L%s' origin_index = 0 while origin_index < line_len and None in coords[origin_index]: origin_index += 1 if origin_index == line_len: return if self.graph.horizontal: coord_format = lambda xy: '%f %f' % (xy[1], xy[0]) else: coord_format = lambda xy: '%f %f' % xy origin = coord_format(coords[origin_index]) line = ' '.join([ coord_format(c) for c in coords[origin_index + 1:] if None not in c ]) return self.node(node, 'path', d=root % (origin, line), **kwargs)
0.004802
def network_define(name, bridge, forward, **kwargs): ''' Create libvirt network. :param name: Network name :param bridge: Bridge name :param forward: Forward mode(bridge, router, nat) :param vport: Virtualport type :param tag: Vlan tag :param autostart: Network autostart (default True) :param start: Network start (default True) :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults CLI Example: .. code-block:: bash salt '*' virt.network_define network main bridge openvswitch .. versionadded:: 2019.2.0 ''' conn = __get_conn(**kwargs) vport = kwargs.get('vport', None) tag = kwargs.get('tag', None) autostart = kwargs.get('autostart', True) starting = kwargs.get('start', True) net_xml = _gen_net_xml( name, bridge, forward, vport, tag, ) try: conn.networkDefineXML(net_xml) except libvirtError as err: log.warning(err) conn.close() raise err # a real error we should report upwards try: network = conn.networkLookupByName(name) except libvirtError as err: log.warning(err) conn.close() raise err # a real error we should report upwards if network is None: conn.close() return False if (starting is True or autostart is True) and network.isActive() != 1: network.create() if autostart is True and network.autostart() != 1: network.setAutostart(int(autostart)) elif autostart is False and network.autostart() == 1: network.setAutostart(int(autostart)) conn.close() return True
0.000552
def diff(x, y, x_only=False, y_only=False): """ Retrieve a unique of list of elements that do not exist in both x and y. Capable of parsing one-dimensional (flat) and two-dimensional (lists of lists) lists. :param x: list #1 :param y: list #2 :param x_only: Return only unique values from x :param y_only: Return only unique values from y :return: list of unique values """ # Validate both lists, confirm neither are empty if len(x) == 0 and len(y) > 0: return y # All y values are unique if x is empty elif len(y) == 0 and len(x) > 0: return x # All x values are unique if y is empty elif len(y) == 0 and len(x) == 0: return [] # Convert dictionaries to lists of tuples if isinstance(x, dict): x = list(x.items()) if isinstance(y, dict): y = list(y.items()) # Get the input type to convert back to before return try: input_type = type(x[0]) except IndexError: input_type = type(y[0]) # Dealing with a 2D dataset (list of lists) if input_type not in (str, int, float): # Immutable and Unique - Convert list of tuples into set of tuples first_set = set(map(tuple, x)) secnd_set = set(map(tuple, y)) # Dealing with a 1D dataset (list of items) else: # Unique values only first_set = set(x) secnd_set = set(y) # Determine which list is longest longest = first_set if len(first_set) > len(secnd_set) else secnd_set shortest = secnd_set if len(first_set) > len(secnd_set) else first_set # Generate set of non-shared values and return list of values in original type uniques = {i for i in longest if i not in shortest} # Add unique elements from shorter list for i in shortest: if i not in longest: uniques.add(i) # Return unique values from x, y or both if x_only: return [input_type(i) for i in uniques if input_type(i) in x] elif y_only: return [input_type(i) for i in uniques if input_type(i) in y] else: return [input_type(i) for i in uniques]
0.001404
def week_number(date): """ Return the Python week number of a date. The django \|date:"W" returns incompatible value with the view implementation. """ week_number = date.strftime('%W') if int(week_number) < 10: week_number = week_number[-1] return week_number
0.006689
def _send(self, message, read_reply=False): """Send a command string to the amplifier.""" sock = None for tries in range(0, 3): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self._host, self.PORT)) break except (ConnectionError, BrokenPipeError): if tries == 3: print("socket connect failed.") return sleep(0.1) sock.send(codecs.decode(message, 'hex_codec')) if read_reply: sleep(0.1) reply = '' tries = 0 max_tries = 20 while len(reply) < len(message) and tries < max_tries: try: reply += codecs.encode(sock.recv(self.BUFFERSIZE), 'hex')\ .decode("utf-8") except (ConnectionError, BrokenPipeError): pass tries += 1 sock.close() if tries >= max_tries: return return reply sock.close()
0.00177
def attention_lm_moe_base_long_seq(): """Hyper parameters specifics for long sequence generation.""" hparams = attention_lm_moe_base() hparams.max_length = 0 # max_length == batch_size hparams.eval_drop_long_sequences = True hparams.min_length_bucket = 256 # Avoid cyclic problems for big batches hparams.use_sepconv = True return hparams
0.022472
def subontology(self, minimal=False): """ Generates a sub-ontology based on associations """ return self.ontology.subontology(self.objects, minimal=minimal)
0.010638
def load_from_stream(self, stream, container, **kwargs): """ Load config from given file like object 'stream`. :param stream: Config file or file like object :param container: callble to make a container object later :param kwargs: optional keyword parameters to be sanitized :: dict :return: Dict-like object holding config parameters """ _not_implemented(self, stream, container, **kwargs)
0.004367
def get_user(uwnetid, include_course_summary=True): """ Return a list of BridgeUsers objects with custom fields """ url = author_uid_url(uwnetid) + "?%s" % CUSTOM_FIELD if include_course_summary: url = "%s&%s" % (url, COURSE_SUMMARY) resp = get_resource(url) return _process_json_resp_data(resp)
0.003021
def _reverse_transform_column(self, table, metadata, table_name): """Reverses the transformtion on a column from table using the given parameters. Args: table (pandas.DataFrame): Dataframe containing column to transform. metadata (dict): Metadata for given column. table_name (str): Name of table in original dataset. Returns: pandas.DataFrame: Dataframe containing the transformed column. If self.missing=True, it will contain a second column containing 0 and 1 marking if that value was originally null or not. It will return None in the case the column is not in the table. """ column_name = metadata['name'] if column_name not in table: return null_name = '?' + column_name content = pd.DataFrame(columns=[column_name], index=table.index) transformer = self.transformers[(table_name, column_name)] content[column_name] = transformer.reverse_transform(table[column_name].to_frame()) if self.missing and null_name in table[column_name]: content[null_name] = table.pop(null_name) null_transformer = transformers.NullTransformer(metadata) content[column_name] = null_transformer.reverse_transform(content) return content
0.004968
def rlmb_base_stochastic_discrete_noresize(): """Base setting with stochastic discrete model.""" hparams = rlmb_base() hparams.generative_model = "next_frame_basic_stochastic_discrete" hparams.generative_model_params = "next_frame_basic_stochastic_discrete" hparams.resize_height_factor = 1 hparams.resize_width_factor = 1 return hparams
0.022792
def get_documenter(obj, parent): """Get an autodoc.Documenter class suitable for documenting the given object. *obj* is the Python object to be documented, and *parent* is an another Python object (e.g. a module or a class) to which *obj* belongs to. """ from sphinx.ext.autodoc import AutoDirective, DataDocumenter, \ ModuleDocumenter if inspect.ismodule(obj): # ModuleDocumenter.can_document_member always returns False return ModuleDocumenter # Construct a fake documenter for *parent* if parent is not None: parent_doc_cls = get_documenter(parent, None) else: parent_doc_cls = ModuleDocumenter if hasattr(parent, '__name__'): parent_doc = parent_doc_cls(FakeDirective(), parent.__name__) else: parent_doc = parent_doc_cls(FakeDirective(), "") # Get the corrent documenter class for *obj* classes = [cls for cls in AutoDirective._registry.values() if cls.can_document_member(obj, '', False, parent_doc)] if classes: classes.sort(key=lambda cls: cls.priority) return classes[-1] else: return DataDocumenter
0.001699
def setProduct(self, cache=False, *args, **kwargs): """Adds the product for this loan to a 'product' field. Product is a MambuProduct object. cache argument allows to use AllMambuProducts singleton to retrieve the products. See mambuproduct.AllMambuProducts code and pydoc for further information. Returns the number of requests done to Mambu. """ if cache: try: prods = self.allmambuproductsclass(*args, **kwargs) except AttributeError as ae: from .mambuproduct import AllMambuProducts self.allmambuproductsclass = AllMambuProducts prods = self.allmambuproductsclass(*args, **kwargs) for prod in prods: if prod['encodedKey'] == self['productTypeKey']: self['product'] = prod try: # asked for cache, but cache was originally empty prods.noinit except AttributeError: return 1 return 0 try: product = self.mambuproductclass(entid=self['productTypeKey'], *args, **kwargs) except AttributeError as ae: from .mambuproduct import MambuProduct self.mambuproductclass = MambuProduct product = self.mambuproductclass(entid=self['productTypeKey'], *args, **kwargs) self['product'] = product return 1
0.002742
def solid_named(self, name): '''Return the solid named "name". Throws if it does not exist. Args: name (str): Name of solid Returns: SolidDefinition: SolidDefinition with correct name. ''' check.str_param(name, 'name') if name not in self._solid_dict: raise DagsterInvariantViolationError( 'Pipeline {pipeline_name} has no solid named {name}.'.format( pipeline_name=self.name, name=name ) ) return self._solid_dict[name]
0.00346
def swap(self, c2): ''' put the order of currencies as market standard ''' inv = False c1 = self if c1.order > c2.order: ct = c1 c1 = c2 c2 = ct inv = True return inv, c1, c2
0.007194
def docopt(doc, argv=None, help=True, version=None, options_first=False): """Parse `argv` based on command-line interface described in `doc`. `docopt` creates your command-line interface based on its description that you pass as `doc`. Such description can contain --options, <positional-argument>, commands, which could be [optional], (required), (mutually | exclusive) or repeated... Parameters ---------- doc : str Description of your command-line interface. argv : list of str, optional Argument vector to be parsed. sys.argv[1:] is used if not provided. help : bool (default: True) Set to False to disable automatic help on -h or --help options. version : any object If passed, the object will be printed if --version is in `argv`. options_first : bool (default: False) Set to True to require options preceed positional arguments, i.e. to forbid options and positional arguments intermix. Returns ------- args : dict A dictionary, where keys are names of command-line elements such as e.g. "--verbose" and "<path>", and values are the parsed values of those elements. Example ------- >>> from docopt import docopt >>> doc = ''' Usage: my_program tcp <host> <port> [--timeout=<seconds>] my_program serial <port> [--baud=<n>] [--timeout=<seconds>] my_program (-h | --help | --version) Options: -h, --help Show this screen and exit. --baud=<n> Baudrate [default: 9600] ''' >>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30'] >>> docopt(doc, argv) {'--baud': '9600', '--help': False, '--timeout': '30', '--version': False, '<host>': '127.0.0.1', '<port>': '80', 'serial': False, 'tcp': True} See also -------- * For video introduction see http://docopt.org * Full documentation is available in README.rst as well as online at https://github.com/docopt/docopt#readme """ if argv is None: argv = sys.argv[1:] DocoptExit.usage = printable_usage(doc) options = parse_defaults(doc) pattern = parse_pattern(formal_usage(DocoptExit.usage), options) # [default] syntax for argument is disabled #for a in pattern.flat(Argument): # same_name = [d for d in arguments if d.name == a.name] # if same_name: # a.value = same_name[0].value argv = parse_argv(TokenStream(argv, DocoptExit), list(options), options_first) pattern_options = set(pattern.flat(Option)) for ao in pattern.flat(AnyOptions): doc_options = parse_defaults(doc) ao.children = list(set(doc_options) - pattern_options) #if any_options: # ao.children += [Option(o.short, o.long, o.argcount) # for o in argv if type(o) is Option] extras(help, version, argv, doc) matched, left, collected = pattern.fix().match(argv) if matched: return Dict((a.name, a.value) for a in (pattern.flat() + collected)) raise DocoptExit()
0.000951
def modified_lines(filename, extra_data, commit=None): """Returns the lines that have been modifed for this file. Args: filename: the file to check. extra_data: is the extra_data returned by modified_files. Additionally, a value of None means that the file was not modified. commit: the complete sha1 (40 chars) of the commit. Note that specifying this value will only work (100%) when commit == last_commit (with respect to the currently checked out revision), otherwise, we could miss some lines. Returns: a list of lines that were modified, or None in case all lines are new. """ if extra_data is None: return [] if extra_data not in ('M ', ' M', 'MM'): return None if commit is None: commit = '0' * 40 commit = commit.encode('utf-8') # Split as bytes, as the output may have some non unicode characters. blame_lines = subprocess.check_output( ['git', 'blame', '--porcelain', filename]).split( os.linesep.encode('utf-8')) modified_line_numbers = utils.filter_lines( blame_lines, commit + br' (?P<line>\d+) (\d+)', groups=('line', )) return list(map(int, modified_line_numbers))
0.001612
def extract_grid(self, longmin, longmax, latmin, latmax): ''' Extract part of the image ``img`` Args: longmin (float): Minimum longitude of the window longmax (float): Maximum longitude of the window latmin (float): Minimum latitude of the window latmax (float): Maximum latitude of the window Returns: A tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the longitudes, ``Y`` contains the latitude and ``Z`` the values extracted from the window. Note: All return arrays have the same size. All coordinate are in degree. ''' sample_min, sample_max = map( int, map(self.sample_id, [longmin, longmax])) line_min, line_max = map(int, map(self.line_id, [latmax, latmin])) X = np.array(map(self.long_id, (range(sample_min, sample_max, 1)))) Y = np.array(map(self.lat_id, (range(line_min, line_max + 1, 1)))) for i, line in enumerate(range(int(line_min), int(line_max) + 1)): start = (line - 1) * int(self.SAMPLE_LAST_PIXEL) + sample_min chunk_size = int(sample_max - sample_min) Za = self.array(chunk_size, start, self.bytesize) if i == 0: Z = Za else: Z = np.vstack((Z, Za)) X, Y = np.meshgrid(X, Y) return X, Y, Z
0.001397
def getPartitionId(self, i): """ Gets the partition id given an index. :param i: index of partition :returns: the partition id associated with pattern i. Returns None if no id is associated with it. """ if (i < 0) or (i >= self._numPatterns): raise RuntimeError("index out of bounds") partitionId = self._partitionIdList[i] if partitionId == numpy.inf: return None else: return partitionId
0.00885
def _load_corpus(name, data_home=None): """ Load a corpus object by name. """ info = DATASETS[name] return Corpus(name, data_home=data_home, **info)
0.005952
def restore(self, checkpoint_path): """Restores training state from a given model checkpoint. These checkpoints are returned from calls to save(). Subclasses should override ``_restore()`` instead to restore state. This method restores additional metadata saved with the checkpoint. """ with open(checkpoint_path + ".tune_metadata", "rb") as f: metadata = pickle.load(f) self._experiment_id = metadata["experiment_id"] self._iteration = metadata["iteration"] self._timesteps_total = metadata["timesteps_total"] self._time_total = metadata["time_total"] self._episodes_total = metadata["episodes_total"] saved_as_dict = metadata["saved_as_dict"] if saved_as_dict: with open(checkpoint_path, "rb") as loaded_state: checkpoint_dict = pickle.load(loaded_state) self._restore(checkpoint_dict) else: self._restore(checkpoint_path) self._time_since_restore = 0.0 self._timesteps_since_restore = 0 self._iterations_since_restore = 0 self._restored = True
0.001729
def getWorkingPlayAreaRect(self): """ Returns the 4 corner positions of the Play Area (formerly named Soft Bounds) from the working copy. Corners are in clockwise order. Tracking space center (0,0,0) is the center of the Play Area. It's a rectangle. 2 sides are parallel to the X axis and 2 sides are parallel to the Z axis. Height of every corner is 0Y (on the floor). """ fn = self.function_table.getWorkingPlayAreaRect rect = HmdQuad_t() result = fn(byref(rect)) return result, rect
0.006873
def log_histograms(self, model: Model, histogram_parameters: Set[str]) -> None: """ Send histograms of parameters to tensorboard. """ for name, param in model.named_parameters(): if name in histogram_parameters: self.add_train_histogram("parameter_histogram/" + name, param)
0.005988
def _start(self, my_task, force=False): """Returns False when successfully fired, True otherwise""" if (not hasattr(my_task, 'subprocess')) or my_task.subprocess is None: my_task.subprocess = subprocess.Popen(self.args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE) if my_task.subprocess: my_task.subprocess.poll() if my_task.subprocess.returncode is None: # Still waiting return False else: results = my_task.subprocess.communicate() my_task.results = results return True return False
0.002667
def _expectation(p, lin_kern, feat1, rbf_kern, feat2, nghp=None): """ Compute the expectation: expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n) - K_lin_{.,.} :: Linear kernel - K_rbf_{.,.} :: RBF kernel Different Z1 and Z2 are handled if p is diagonal and K_lin and K_rbf have disjoint active_dims, in which case the joint expectations simplify into a product of expectations :return: NxM1xM2 """ return tf.matrix_transpose(expectation(p, (rbf_kern, feat2), (lin_kern, feat1)))
0.007561
def _update_staticmethod(self, oldsm, newsm): """Update a staticmethod update.""" # While we can't modify the staticmethod object itself (it has no # mutable attributes), we *can* extract the underlying function # (by calling __get__(), which returns it) and update it in-place. # We don't have the class available to pass to __get__() but any # object except None will do. self._update(None, None, oldsm.__get__(0), newsm.__get__(0))
0.004082
def parse(text): """ Parse the docstring into its components. :returns: parsed docstring """ ret = Docstring() if not text: return ret text = inspect.cleandoc(text) match = re.search('^:', text, flags=re.M) if match: desc_chunk = text[:match.start()] meta_chunk = text[match.start():] else: desc_chunk = text meta_chunk = '' parts = desc_chunk.split('\n', 1) ret.short_description = parts[0] or None if len(parts) > 1: long_desc_chunk = parts[1] or '' ret.blank_after_short_description = long_desc_chunk.startswith('\n') ret.blank_after_long_description = long_desc_chunk.endswith('\n\n') ret.long_description = long_desc_chunk.strip() or None for match in re.finditer( r'(^:.*?)(?=^:|\Z)', meta_chunk, flags=re.S | re.M ): chunk = match.group(0) if not chunk: continue try: args_chunk, desc_chunk = chunk.lstrip(':').split(':', 1) except ValueError: raise ParseError( 'Error parsing meta information near "{}".'.format(chunk) ) args = args_chunk.split() desc = desc_chunk.strip() if '\n' in desc: first_line, rest = desc.split('\n', 1) desc = first_line + '\n' + inspect.cleandoc(rest) ret.meta.append(DocstringMeta(args, description=desc, type=None)) return ret
0.000681
async def set_message( self, text=None, reply_to=0, parse_mode=(), link_preview=None): """ Changes the draft message on the Telegram servers. The changes are reflected in this object. :param str text: New text of the draft. Preserved if left as None. :param int reply_to: Message ID to reply to. Preserved if left as 0, erased if set to None. :param bool link_preview: Whether to attach a web page preview. Preserved if left as None. :param str parse_mode: The parse mode to be used for the text. :return bool: ``True`` on success. """ if text is None: text = self._text if reply_to == 0: reply_to = self.reply_to_msg_id if link_preview is None: link_preview = self.link_preview raw_text, entities =\ await self._client._parse_message_text(text, parse_mode) result = await self._client(SaveDraftRequest( peer=self._peer, message=raw_text, no_webpage=not link_preview, reply_to_msg_id=reply_to, entities=entities )) if result: self._text = text self._raw_text = raw_text self.link_preview = link_preview self.reply_to_msg_id = reply_to self.date = datetime.datetime.now(tz=datetime.timezone.utc) return result
0.001304
def _writecheck(self, zinfo): """Check for errors before writing a file to the archive.""" if zinfo.filename in self.NameToInfo: if self.debug: # Warning for duplicate names print "Duplicate name:", zinfo.filename if self.mode not in ("w", "a"): raise RuntimeError, 'write() requires mode "w" or "a"' if not self.fp: raise RuntimeError, \ "Attempt to write ZIP archive that was already closed" if zinfo.compress_type == ZIP_DEFLATED and not zlib: raise RuntimeError, \ "Compression requires the (missing) zlib module" if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED): raise RuntimeError, \ "That compression method is not supported" if zinfo.file_size > ZIP64_LIMIT: if not self._allowZip64: raise LargeZipFile("Filesize would require ZIP64 extensions") if zinfo.header_offset > ZIP64_LIMIT: if not self._allowZip64: raise LargeZipFile("Zipfile size would require ZIP64 extensions")
0.006114
def mtz(n,c): """mtz: Miller-Tucker-Zemlin's model for the (asymmetric) traveling salesman problem (potential formulation) Parameters: - n: number of nodes - c[i,j]: cost for traversing arc (i,j) Returns a model, ready to be solved. """ model = Model("atsp - mtz") x,u = {},{} for i in range(1,n+1): u[i] = model.addVar(lb=0, ub=n-1, vtype="C", name="u(%s)"%i) for j in range(1,n+1): if i != j: x[i,j] = model.addVar(vtype="B", name="x(%s,%s)"%(i,j)) for i in range(1,n+1): model.addCons(quicksum(x[i,j] for j in range(1,n+1) if j != i) == 1, "Out(%s)"%i) model.addCons(quicksum(x[j,i] for j in range(1,n+1) if j != i) == 1, "In(%s)"%i) for i in range(1,n+1): for j in range(2,n+1): if i != j: model.addCons(u[i] - u[j] + (n-1)*x[i,j] <= n-2, "MTZ(%s,%s)"%(i,j)) model.setObjective(quicksum(c[i,j]*x[i,j] for (i,j) in x), "minimize") model.data = x,u return model
0.029894
def render_confirm_form(self): """ Second step of ExpressCheckout. Display an order confirmation form which contains hidden fields with the token / PayerID from PayPal. """ warn_untested() initial = dict(token=self.request.GET['token'], PayerID=self.request.GET['PayerID']) self.context[self.form_context_name] = self.confirm_form_cls(initial=initial) return TemplateResponse(self.request, self.confirm_template, self.context)
0.012245
def ensure_one_opt_multi_ifo(opt, parser, ifo, opt_list): """ Check that one and only one in the opt_list is defined in opt Parameters ---------- opt : object Result of option parsing parser : object OptionParser instance. opt_list : list of strings """ the_one = None for name in opt_list: attr = name[2:].replace('-', '_') try: if getattr(opt, attr)[ifo] is None: raise KeyError except KeyError: pass else: if the_one is None: the_one = name else: parser.error("%s and %s are mutually exculsive" \ % (the_one, name)) if the_one is None: parser.error("you must supply one of the following %s" \ % (', '.join(opt_list)))
0.00576
def send_audio_packet(self, data, *, encode=True): """Sends an audio packet composed of the data. You must be connected to play audio. Parameters ---------- data: bytes The :term:`py:bytes-like object` denoting PCM or Opus voice data. encode: bool Indicates if ``data`` should be encoded into Opus. Raises ------- ClientException You are not connected. OpusError Encoding the data failed. """ self.checked_add('sequence', 1, 65535) if encode: encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME) else: encoded_data = data packet = self._get_voice_packet(encoded_data) try: self.socket.sendto(packet, (self.endpoint_ip, self.voice_port)) except BlockingIOError: log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp) self.checked_add('timestamp', self.encoder.SAMPLES_PER_FRAME, 4294967295)
0.004562
def _intersect_edge_arrays(self, lines1, lines2): """Return the intercepts of all lines defined in *lines1* as they intersect all lines in *lines2*. Arguments are of shape (..., 2, 2), where axes are: 0: number of lines 1: two points per line 2: x,y pair per point Lines are compared elementwise across the arrays (lines1[i] is compared against lines2[i]). If one of the arrays has N=1, then that line is compared against all lines in the other array. Returns an array of shape (N,) where each value indicates the intercept relative to the defined line segment. A value of 0 indicates intersection at the first endpoint, and a value of 1 indicates intersection at the second endpoint. Values between 1 and 0 are on the segment, whereas values outside 1 and 0 are off of the segment. """ # vector for each line in lines1 l1 = lines1[..., 1, :] - lines1[..., 0, :] # vector for each line in lines2 l2 = lines2[..., 1, :] - lines2[..., 0, :] # vector between first point of each line diff = lines1[..., 0, :] - lines2[..., 0, :] p = l1.copy()[..., ::-1] # vectors perpendicular to l1 p[..., 0] *= -1 f = (l2 * p).sum(axis=-1) # l2 dot p # tempting, but bad idea! #f = np.where(f==0, 1, f) err = np.geterr() np.seterr(divide='ignore', invalid='ignore') try: h = (diff * p).sum(axis=-1) / f # diff dot p / f finally: np.seterr(**err) return h
0.008966
def gain(self, db): """gain takes one paramter: gain in dB.""" self.command.append('gain') self.command.append(db) return self
0.012658
def like_button_js_tag(context): """ This tag will check to see if they have the FACEBOOK_LIKE_APP_ID setup correctly in the django settings, if so then it will pass the data along to the intercom_tag template to be displayed. If something isn't perfect we will return False, which will then not install the javascript since it isn't needed. """ if FACEBOOK_APP_ID is None: log.warning("FACEBOOK_APP_ID isn't setup correctly in your settings") # make sure FACEBOOK_APP_ID is setup correct and user is authenticated if FACEBOOK_APP_ID: request = context.get('request', None) if request: return {"LIKE_BUTTON_IS_VALID": True, "facebook_app_id": FACEBOOK_APP_ID, "channel_base_url": request.get_host()} # if it is here, it isn't a valid setup, return False to not show the tag. return {"LIKE_BUTTON_IS_VALID": False}
0.001049
def get_object_record(self, pid): """Get an object that has already been cached in the object tree. Caching happens when the object tree is refreshed. """ try: return self._cache['records'][pid] except KeyError: raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException('Unknown PID')
0.008499
def VariablesValues(self, variables, t): """ Returns the value of given variables at time t. Linear interpolation is performed between two time steps. :param variables: one variable or a list of variables :param t: time of evaluation """ # TODO: put interpolation in variables if (t < self.te) | (t > 0): i = t//self.ts # time step ti = self.ts*i if type(variables) == list: values = [] for variable in variables: # interpolation values.append( variables.values[i]*((ti-t)/self.ts+1)+variables.values[i+1]*(t-ti)/self.ts) return values else: # interpolation return variables.values[i]*((ti-t)/self.ts+1)+variables.values[i+1]*(t-ti)/self.ts else: raise ValueError
0.005302
def cancel_order(self, multi=False, **order_identifiers): """Cancel one or multiple orders via Websocket. :param multi: bool, whether order_settings contains settings for one, or multiples orders :param order_identifiers: Identifiers for the order(s) you with to cancel :return: """ if multi: self._send_auth_command('oc_multi', order_identifiers) else: self._send_auth_command('oc', order_identifiers)
0.007921
def _remove_io_handler(self, handler): """Remove an i/o-handler.""" if handler in self._unprepared_handlers: old_fileno = self._unprepared_handlers[handler] del self._unprepared_handlers[handler] else: old_fileno = handler.fileno() if old_fileno is not None: del self._handlers[old_fileno] self.io_loop.remove_handler(handler.fileno())
0.004684
def from_url(cls, db_url=ALL_SETS_ZIP_URL): """Load card data from a URL. Uses :func:`requests.get` to fetch card data. Also handles zipfiles. :param db_url: URL to fetch. :return: A new :class:`~mtgjson.CardDb` instance. """ r = requests.get(db_url) r.raise_for_status() if r.headers['content-type'] == 'application/json': return cls(json.loads(r.text)) if r.headers['content-type'] == 'application/zip': with zipfile.ZipFile(six.BytesIO(r.content), 'r') as zf: names = zf.namelist() assert len(names) == 1, 'One datafile in ZIP' return cls.from_file(io.TextIOWrapper( zf.open(names[0]), encoding='utf8'))
0.002519
def file_list_hosts( blockchain_id, wallet_keys=None, config_path=CONFIG_PATH ): """ Given a blockchain ID, find out the hosts the blockchain ID owner has registered keys for. Return {'status': True, 'hosts': hostnames} on success Return {'error': ...} on failure """ config_dir = os.path.dirname(config_path) try: ret = blockstack_gpg.gpg_list_app_keys( blockchain_id, APP_NAME, wallet_keys=wallet_keys, config_dir=config_dir ) except Exception, e: ret = {'error': traceback.format_exc(e)} if 'error' in ret: log.error("Failed to list app keys: %s" % ret['error']) return {'error': 'Failed to list app keys'} hosts = [] for key_info in ret: hostname = key_info['keyName'] hosts.append(hostname) return {'status': True, 'hosts': hosts}
0.009592
def append_track(self, track=None, pianoroll=None, program=0, is_drum=False, name='unknown'): """ Append a multitrack.Track instance to the track list or create a new multitrack.Track object and append it to the track list. Parameters ---------- track : pianoroll.Track A :class:`pypianoroll.Track` instance to be appended to the track list. pianoroll : np.ndarray, shape=(n_time_steps, 128) A pianoroll matrix. The first and second dimension represents time and pitch, respectively. Available datatypes are bool, int and float. Only effective when `track` is None. program: int A program number according to General MIDI specification [1]. Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano). Only effective when `track` is None. is_drum : bool A boolean number that indicates whether it is a percussion track. Defaults to False. Only effective when `track` is None. name : str The name of the track. Defaults to 'unknown'. Only effective when `track` is None. References ---------- [1] https://www.midi.org/specifications/item/gm-level-1-sound-set """ if track is not None: if not isinstance(track, Track): raise TypeError("`track` must be a pypianoroll.Track instance.") track.check_validity() else: track = Track(pianoroll, program, is_drum, name) self.tracks.append(track)
0.003032
def create_source_import(self, vcs, vcs_url, vcs_username=github.GithubObject.NotSet, vcs_password=github.GithubObject.NotSet): """ :calls: `PUT /repos/:owner/:repo/import <https://developer.github.com/v3/migration/source_imports/#start-an-import>`_ :param vcs: string :param vcs_url: string :param vcs_username: string :param vcs_password: string :rtype: :class:`github.SourceImport.SourceImport` """ assert isinstance(vcs, (str, unicode)), vcs assert isinstance(vcs_url, (str, unicode)), vcs_url assert vcs_username is github.GithubObject.NotSet or isinstance(vcs_username, (str, unicode)), vcs_username assert vcs_password is github.GithubObject.NotSet or isinstance(vcs_password, (str, unicode)), vcs_password put_parameters = { "vcs": vcs, "vcs_url": vcs_url } if vcs_username is not github.GithubObject.NotSet: put_parameters["vcs_username"] = vcs_username if vcs_password is not github.GithubObject.NotSet: put_parameters["vcs_password"] = vcs_password import_header = {"Accept": Consts.mediaTypeImportPreview} headers, data = self._requester.requestJsonAndCheck( "PUT", self.url + "/import", headers=import_header, input=put_parameters ) return github.SourceImport.SourceImport(self._requester, headers, data, completed=False)
0.004682
async def sleep(self, duration: float=0.0) -> None: '''Simple wrapper around `asyncio.sleep()`.''' duration = max(0, duration) if duration > 0: Log.debug('sleeping task %s for %.1f seconds', self.name, duration) await asyncio.sleep(duration)
0.013793
def make_spot_fleet_cluster( security_groupid, subnet_id, keypair_name, iam_instance_profile_arn, spot_fleet_iam_role, target_capacity=20, spot_price=0.4, expires_days=7, allocation_strategy='lowestPrice', instance_types=SPOT_INSTANCE_TYPES, instance_weights=None, instance_ami='ami-04681a1dbd79675a5', instance_user_data=None, instance_ebs_optimized=True, wait_until_up=True, client=None, raiseonfail=False ): """This makes an EC2 spot-fleet cluster. This requires a security group ID attached to a VPC config and subnet, a keypair generated beforehand, and an IAM role ARN for the instance. See: https://docs.aws.amazon.com/cli/latest/userguide/tutorial-ec2-ubuntu.html Use `user_data` to launch tasks on instance launch. Parameters ---------- security_groupid : str The security group ID of the AWS VPC where the instances will be launched. subnet_id : str The subnet ID of the AWS VPC where the instances will be launched. keypair_name : str The name of the keypair to be used to allow SSH access to all instances launched here. This corresponds to an already downloaded AWS keypair PEM file. iam_instance_profile_arn : str The ARN string corresponding to the AWS instance profile that describes the permissions the launched instances have to access other AWS resources. Set this up in AWS IAM. spot_fleet_iam_role : str This is the name of AWS IAM role that allows the Spot Fleet Manager to scale up and down instances based on demand and instances failing, etc. Set this up in IAM. target_capacity : int The number of instances to target in the fleet request. The fleet manager service will attempt to maintain this number over the lifetime of the Spot Fleet Request. spot_price : float The bid price in USD for the instances. This is per hour. Keep this at about half the hourly on-demand price of the desired instances to make sure your instances aren't taken away by AWS when it needs capacity. expires_days : int The number of days this request is active for. All instances launched by this request will live at least this long and will be terminated automatically after. allocation_strategy : {'lowestPrice', 'diversified'} The allocation strategy used by the fleet manager. instance_types : list of str List of the instance type to launch. See the following URL for a list of IDs: https://aws.amazon.com/ec2/pricing/on-demand/ instance_weights : list of float or None If `instance_types` is a list of different instance types, this is the relative weight applied towards launching each instance type. This can be used to launch a mix of instances in a defined ratio among their types. Doing this can make the spot fleet more resilient to AWS taking back the instances if it runs out of capacity. instance_ami : str The Amazon Machine Image ID that describes the OS the instances will use after launch. The default ID is Amazon Linux 2 in the US East region. instance_user_data : str or None This is either the path to a file on disk that contains a shell-script or a string containing a shell-script that will be executed by root right after the instance is launched. Use to automatically set up workers and queues. If None, will not execute anything at instance start up. instance_ebs_optimized : bool If True, will enable EBS optimization to speed up IO. This is usually True for all instances made available in the last couple of years. wait_until_up : bool If True, will not return from this function until the spot fleet request is acknowledged by AWS. client : boto3.Client or None If None, this function will instantiate a new `boto3.Client` object to use in its operations. Alternatively, pass in an existing `boto3.Client` instance to re-use it here. raiseonfail : bool If True, will re-raise whatever Exception caused the operation to fail and break out immediately. Returns ------- str or None This is the spot fleet request ID if successful. Otherwise, returns None. """ fleetconfig = copy.deepcopy(SPOT_FLEET_CONFIG) fleetconfig['IamFleetRole'] = spot_fleet_iam_role fleetconfig['AllocationStrategy'] = allocation_strategy fleetconfig['TargetCapacity'] = target_capacity fleetconfig['SpotPrice'] = str(spot_price) fleetconfig['ValidUntil'] = ( datetime.utcnow() + timedelta(days=expires_days) ).strftime( '%Y-%m-%dT%H:%M:%SZ' ) # get the user data from a string or a file # we need to base64 encode it here if (isinstance(instance_user_data, str) and os.path.exists(instance_user_data)): with open(instance_user_data,'rb') as infd: udata = base64.b64encode(infd.read()).decode() elif isinstance(instance_user_data, str): udata = base64.b64encode(instance_user_data.encode()).decode() else: udata = ( '#!/bin/bash\necho "No user data provided. ' 'Launched instance at: %s UTC"' % datetime.utcnow().isoformat() ) udata = base64.b64encode(udata.encode()).decode() for ind, itype in enumerate(instance_types): thisinstance = SPOT_PERINSTANCE_CONFIG.copy() thisinstance['InstanceType'] = itype thisinstance['ImageId'] = instance_ami thisinstance['SubnetId'] = subnet_id thisinstance['KeyName'] = keypair_name thisinstance['IamInstanceProfile']['Arn'] = iam_instance_profile_arn thisinstance['SecurityGroups'][0] = {'GroupId':security_groupid} thisinstance['UserData'] = udata thisinstance['EbsOptimized'] = instance_ebs_optimized # get the instance weights if isinstance(instance_weights, list): thisinstance['WeightedCapacity'] = instance_weights[ind] fleetconfig['LaunchSpecifications'].append(thisinstance) # # launch the fleet # if not client: client = boto3.client('ec2') try: resp = client.request_spot_fleet( SpotFleetRequestConfig=fleetconfig, ) if not resp: LOGERROR('spot fleet request failed.') return None else: spot_fleet_reqid = resp['SpotFleetRequestId'] LOGINFO('spot fleet requested successfully. request ID: %s' % spot_fleet_reqid) if not wait_until_up: return spot_fleet_reqid else: ntries = 10 curr_try = 0 while curr_try < ntries: resp = client.describe_spot_fleet_requests( SpotFleetRequestIds=[ spot_fleet_reqid ] ) curr_state = resp.get('SpotFleetRequestConfigs',[]) if len(curr_state) > 0: curr_state = curr_state[0]['SpotFleetRequestState'] if curr_state == 'active': LOGINFO('spot fleet with reqid: %s is now active' % spot_fleet_reqid) break LOGINFO( 'spot fleet not yet active, waiting 15 seconds. ' 'try %s/%s' % (curr_try, ntries) ) curr_try = curr_try + 1 time.sleep(15.0) return spot_fleet_reqid except ClientError as e: LOGEXCEPTION('could not launch spot fleet') if raiseonfail: raise return None except Exception as e: LOGEXCEPTION('could not launch spot fleet') if raiseonfail: raise return None
0.001575
def clean_fail(func): ''' A decorator to cleanly exit on a failed call to AWS. catch a `botocore.exceptions.ClientError` raised from an action. This sort of error is raised if you are targeting a region that isn't set up (see, `credstash setup`. ''' def func_wrapper(*args, **kwargs): try: return func(*args, **kwargs) except botocore.exceptions.ClientError as e: print(str(e), file=sys.stderr) sys.exit(1) return func_wrapper
0.001961
def simulate(self): """ Section 7 - uwg main section self.N # Total hours in simulation self.ph # per hour self.dayType # 3=Sun, 2=Sat, 1=Weekday self.ceil_time_step # simulation timestep (dt) fitted to weather file timestep # Output of object instance vector self.WeatherData # Nx1 vector of forc instance self.UCMData # Nx1 vector of UCM instance self.UBLData # Nx1 vector of UBL instance self.RSMData # Nx1 vector of RSM instance self.USMData # Nx1 vector of USM instance """ self.N = int(self.simTime.days * 24) # total number of hours in simulation n = 0 # weather time step counter self.ph = self.simTime.dt/3600. # dt (simulation time step) in hours # Data dump variables time = range(self.N) self.WeatherData = [None for x in range(self.N)] self.UCMData = [None for x in range(self.N)] self.UBLData = [None for x in range(self.N)] self.RSMData = [None for x in range(self.N)] self.USMData = [None for x in range(self.N)] print('\nSimulating new temperature and humidity values for {} days from {}/{}.\n'.format( int(self.nDay), int(self.Month), int(self.Day))) self.logger.info("Start simulation") for it in range(1, self.simTime.nt, 1): # for every simulation time-step (i.e 5 min) defined by uwg # Update water temperature (estimated) if self.nSoil < 3: # correction to original matlab code # for BUBBLE/CAPITOUL/Singapore only self.forc.deepTemp = sum(self.forcIP.temp)/float(len(self.forcIP.temp)) self.forc.waterTemp = sum( self.forcIP.temp)/float(len(self.forcIP.temp)) - 10. # for BUBBLE/CAPITOUL/Singapore only else: # soil temperature by depth, by month self.forc.deepTemp = self.Tsoil[self.soilindex1][self.simTime.month-1] self.forc.waterTemp = self.Tsoil[2][self.simTime.month-1] # There's probably a better way to update the weather... self.simTime.UpdateDate() self.logger.info("\n{0} m={1}, d={2}, h={3}, s={4}".format( __name__, self.simTime.month, self.simTime.day, self.simTime.secDay/3600., self.simTime.secDay)) # simulation time increment raised to weather time step self.ceil_time_step = int(math.ceil(it * self.ph))-1 # minus one to be consistent with forcIP list index # Updating forcing instance # horizontal Infrared Radiation Intensity (W m-2) self.forc.infra = self.forcIP.infra[self.ceil_time_step] # wind speed (m s-1) self.forc.wind = max(self.forcIP.wind[self.ceil_time_step], self.geoParam.windMin) self.forc.uDir = self.forcIP.uDir[self.ceil_time_step] # wind direction # specific humidty (kg kg-1) self.forc.hum = self.forcIP.hum[self.ceil_time_step] self.forc.pres = self.forcIP.pres[self.ceil_time_step] # Pressure (Pa) self.forc.temp = self.forcIP.temp[self.ceil_time_step] # air temperature (C) self.forc.rHum = self.forcIP.rHum[self.ceil_time_step] # Relative humidity (%) self.forc.prec = self.forcIP.prec[self.ceil_time_step] # Precipitation (mm h-1) # horizontal solar diffuse radiation (W m-2) self.forc.dif = self.forcIP.dif[self.ceil_time_step] # normal solar direct radiation (W m-2) self.forc.dir = self.forcIP.dir[self.ceil_time_step] # Canyon humidity (absolute) same as rural self.UCM.canHum = copy.copy(self.forc.hum) # Update solar flux self.solar = SolarCalcs(self.UCM, self.BEM, self.simTime, self.RSM, self.forc, self.geoParam, self.rural) self.rural, self.UCM, self.BEM = self.solar.solarcalcs() # Update building & traffic schedule # Assign day type (1 = weekday, 2 = sat, 3 = sun/other) if self.is_near_zero(self.simTime.julian % 7): self.dayType = 3 # Sunday elif self.is_near_zero(self.simTime.julian % 7 - 6.): self.dayType = 2 # Saturday else: self.dayType = 1 # Weekday # Update anthropogenic heat load for each hour (building & UCM) self.UCM.sensAnthrop = self.sensAnth * (self.SchTraffic[self.dayType-1][self.simTime.hourDay]) # Update the energy components for building types defined in initialize.uwg for i in range(len(self.BEM)): # Set temperature self.BEM[i].building.coolSetpointDay = self.Sch[i].Cool[self.dayType - 1][self.simTime.hourDay] + 273.15 # add from temperature schedule for cooling self.BEM[i].building.coolSetpointNight = self.BEM[i].building.coolSetpointDay self.BEM[i].building.heatSetpointDay = self.Sch[i].Heat[self.dayType - 1][self.simTime.hourDay] + 273.15 # add from temperature schedule for heating self.BEM[i].building.heatSetpointNight = self.BEM[i].building.heatSetpointDay # Internal Heat Load Schedule (W/m^2 of floor area for Q) self.BEM[i].Elec = self.Sch[i].Qelec * self.Sch[i].Elec[self.dayType - 1][self.simTime.hourDay] # Qelec x elec fraction for day self.BEM[i].Light = self.Sch[i].Qlight * self.Sch[i].Light[self.dayType - 1][self.simTime.hourDay] # Qlight x light fraction for day self.BEM[i].Nocc = self.Sch[i].Nocc * self.Sch[i].Occ[self.dayType - 1][self.simTime.hourDay] # Number of occupants x occ fraction for day # Sensible Q occupant * fraction occupant sensible Q * number of occupants self.BEM[i].Qocc = self.sensOcc * (1 - self.LatFOcc) * self.BEM[i].Nocc # SWH and ventilation schedule self.BEM[i].SWH = self.Sch[i].Vswh * self.Sch[i].SWH[self.dayType - 1][self.simTime.hourDay] # litres per hour x SWH fraction for day # m^3/s/m^2 of floor self.BEM[i].building.vent = self.Sch[i].Vent self.BEM[i].Gas = self.Sch[i].Qgas * self.Sch[i].Gas[self.dayType - 1][self.simTime.hourDay] # Gas Equip Schedule, per m^2 of floor # This is quite messy, should update # Update internal heat and corresponding fractional loads intHeat = self.BEM[i].Light + self.BEM[i].Elec + self.BEM[i].Qocc # W/m2 from light, electricity, occupants self.BEM[i].building.intHeatDay = intHeat self.BEM[i].building.intHeatNight = intHeat # fraction of radiant heat from light and equipment of whole internal heat self.BEM[i].building.intHeatFRad = ( self.RadFLight * self.BEM[i].Light + self.RadFEquip * self.BEM[i].Elec) / intHeat # fraction of latent heat (from occupants) of whole internal heat self.BEM[i].building.intHeatFLat = self.LatFOcc * \ self.sensOcc * self.BEM[i].Nocc/intHeat # Update envelope temperature layers self.BEM[i].T_wallex = self.BEM[i].wall.layerTemp[0] self.BEM[i].T_wallin = self.BEM[i].wall.layerTemp[-1] self.BEM[i].T_roofex = self.BEM[i].roof.layerTemp[0] self.BEM[i].T_roofin = self.BEM[i].roof.layerTemp[-1] # Update rural heat fluxes & update vertical diffusion model (VDM) self.rural.infra = self.forc.infra - self.rural.emissivity * self.SIGMA * \ self.rural.layerTemp[0]**4. # Infrared radiation from rural road self.rural.SurfFlux(self.forc, self.geoParam, self.simTime, self.forc.hum, self.forc.temp, self.forc.wind, 2., 0.) self.RSM.VDM(self.forc, self.rural, self.geoParam, self.simTime) # Calculate urban heat fluxes, update UCM & UBL self.UCM, self.UBL, self.BEM = urbflux( self.UCM, self.UBL, self.BEM, self.forc, self.geoParam, self.simTime, self.RSM) self.UCM.UCModel(self.BEM, self.UBL.ublTemp, self.forc, self.geoParam) self.UBL.UBLModel(self.UCM, self.RSM, self.rural, self.forc, self.geoParam, self.simTime) """ # Experimental code to run diffusion model in the urban area # N.B Commented out in python uwg because computed wind speed in # urban VDM: y = =0.84*ln((2-x/20)/0.51) results in negative log # for building heights >= 40m. Uroad = copy.copy(self.UCM.road) Uroad.sens = copy.copy(self.UCM.sensHeat) Uforc = copy.copy(self.forc) Uforc.wind = copy.copy(self.UCM.canWind) Uforc.temp = copy.copy(self.UCM.canTemp) self.USM.VDM(Uforc,Uroad,self.geoParam,self.simTime) """ self.logger.info("dbT = {}".format(self.UCM.canTemp-273.15)) if n > 0: logging.info("dpT = {}".format(self.UCM.Tdp)) logging.info("RH = {}".format(self.UCM.canRHum)) if self.is_near_zero(self.simTime.secDay % self.simTime.timePrint) and n < self.N: self.logger.info("{0} ----sim time step = {1}----\n\n".format(__name__, n)) self.WeatherData[n] = copy.copy(self.forc) _Tdb, _w, self.UCM.canRHum, _h, self.UCM.Tdp, _v = psychrometrics( self.UCM.canTemp, self.UCM.canHum, self.forc.pres) self.UBLData[n] = copy.copy(self.UBL) self.UCMData[n] = copy.copy(self.UCM) self.RSMData[n] = copy.copy(self.RSM) self.logger.info("dbT = {}".format(self.UCMData[n].canTemp-273.15)) self.logger.info("dpT = {}".format(self.UCMData[n].Tdp)) self.logger.info("RH = {}".format(self.UCMData[n].canRHum)) n += 1
0.004801
def chunks(data, chunk_size): """ Yield chunk_size chunks from data.""" for i in xrange(0, len(data), chunk_size): yield data[i:i+chunk_size]
0.006369
def check_for_file(self, share_name, directory_name, file_name, **kwargs): """ Check if a file exists on Azure File Share. :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param kwargs: Optional keyword arguments that `FileService.exists()` takes. :type kwargs: object :return: True if the file exists, False otherwise. :rtype: bool """ return self.connection.exists(share_name, directory_name, file_name, **kwargs)
0.002782
def UpdateResourcesFromResFile(dstpath, srcpath, types=None, names=None, languages=None): """ Update or add resources from dll/exe file srcpath in dll/exe file dstpath. types = a list of resource types to update (None = all) names = a list of resource names to update (None = all) languages = a list of resource languages to update (None = all) """ res = GetResources(srcpath, types, names, languages) UpdateResourcesFromDict(dstpath, res)
0.007797
def note_addition(self, key, value): """ Updates the change state to reflect the addition of a field. Detects previous changes and deletions of the field and acts accordingly. """ # If we're adding a field we previously deleted, remove the deleted note. if key in self._deleted: # If the key we're adding back has a different value, then it's a change if value != self._deleted[key]: self._previous[key] = self._deleted[key] del self._deleted[key] else: self._added.append(key)
0.008375
def Chunks(l, n, all=False): ''' Returns a generator of consecutive `n`-sized chunks of list `l`. If `all` is `True`, returns **all** `n`-sized chunks in `l` by iterating over the starting point. ''' if all: jarr = range(0, n - 1) else: jarr = [0] for j in jarr: for i in range(j, len(l), n): if i + 2 * n <= len(l): yield l[i:i + n] else: if not all: yield l[i:] break
0.003846
def parse_column_names(text): """ Extracts column names from a string containing quoted and comma separated column names. :param text: Line extracted from `COPY` statement containing quoted and comma separated column names. :type text: str :return: Tuple containing just the column names. :rtype: tuple[str] """ return tuple( re.sub(r"^\"(.*)\"$", r"\1", column_name.strip()) for column_name in text.split(",") )
0.002053
def load_config_module(): """ If the config.py file exists, import it as a module. If it does not exist, call sys.exit() with a request to run oaepub configure. """ import imp config_path = config_location() try: config = imp.load_source('config', config_path) except IOError: log.critical('Config file not found. oaepub exiting...') sys.exit('Config file not found. Please run \'oaepub configure\'') else: log.debug('Config file loaded from {0}'.format(config_path)) return config
0.001795
def dump(props, output): """Dumps a dict of properties to the specified open stream or file path. :API: public """ def escape(token): return re.sub(r'([=:\s])', r'\\\1', token) def write(out): for k, v in props.items(): out.write('%s=%s\n' % (escape(str(k)), escape(str(v)))) if hasattr(output, 'write') and callable(output.write): write(output) elif isinstance(output, six.string_types): with open(output, 'w+') as out: write(out) else: raise TypeError('Can only dump data to a path or a writable object, given: %s' % output)
0.011532
def _read_opt_type(self, kind): """Read option type field. Positional arguments: * kind -- int, option kind value Returns: * dict -- extracted IPv4 option Structure of option type field [RFC 791]: Octets Bits Name Descriptions 0 0 ip.opt.type.copy Copied Flag (0/1) 0 1 ip.opt.type.class Option Class (0-3) 0 3 ip.opt.type.number Option Number """ bin_ = bin(kind)[2:].zfill(8) type_ = { 'copy': bool(int(bin_[0], base=2)), 'class': opt_class.get(int(bin_[1:3], base=2)), 'number': int(bin_[3:], base=2), } return type_
0.002491
def create_ip_range(self, network_view, start_ip, end_ip, network, disable, range_extattrs): """Creates IPRange or fails if already exists.""" return obj.IPRange.create(self.connector, network_view=network_view, start_addr=start_ip, end_addr=end_ip, cidr=network, disable=disable, extattrs=range_extattrs, check_if_exists=False)
0.00495
def rate(self, from_currency, to_currency, date): """Get the exchange rate between the specified currencies""" return (1 / self.backend.get_rate(from_currency, date)) * self.backend.get_rate( to_currency, date )
0.012146
def settimeout(self, timeout): """ Set the timeout to the websocket. timeout: timeout time(second). """ self.sock_opt.timeout = timeout if self.sock: self.sock.settimeout(timeout)
0.008333
def reply_code_tuple(code: int) -> Tuple[int, int, int]: '''Return the reply code as a tuple. Args: code: The reply code. Returns: Each item in the tuple is the digit. ''' return code // 100, code // 10 % 10, code % 10
0.003906
def wallet_representative_set(self, wallet, representative): """ Sets the default **representative** for **wallet** .. enable_control required :param wallet: Wallet to set default representative account for :type wallet: str :param representative: Representative account to set for **wallet** :type representative: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.wallet_representative_set( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", ... representative="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000" ... ) True """ wallet = self._process_value(wallet, 'wallet') representative = self._process_value(representative, 'account') payload = {"wallet": wallet, "representative": representative} resp = self.call('wallet_representative_set', payload) return resp['set'] == '1'
0.004004
def remove_dupes(list_with_dupes): '''Remove duplicate entries from a list while preserving order This function uses Python's standard equivalence testing methods in order to determine if two elements of a list are identical. So if in the list [a,b,c] the condition a == b is True, then regardless of whether a and b are strings, ints, or other, then b will be removed from the list: [a, c] Parameters ---------- list_with_dupes : list A list containing duplicate elements Returns ------- out : list The list with the duplicate entries removed by the order preserved Examples -------- >>> a = [1,3,2,4,2] >>> print(remove_dupes(a)) [1,3,2,4] ''' visited = set() visited_add = visited.add out = [ entry for entry in list_with_dupes if not (entry in visited or visited_add(entry))] return out
0.005574
def validate(self): """ validate: Makes sure content node is valid Args: None Returns: boolean indicating if content node is valid """ assert self.role in ROLES, "Assumption Failed: Role must be one of the following {}".format(ROLES) assert isinstance(self.license, str) or isinstance(self.license, License), "Assumption Failed: License is not a string or license object" self.license.validate() # if self.required_file_format: # files_valid = False # #not any(f for f in self.files if isinstance(f, DownloadFile)) # for f in self.files: # files_valid = files_valid or (f.path.endswith(self.required_file_format) # assert files_valid , "Assumption Failed: Node should have at least one {} file".format(self.required_file_format) return super(ContentNode, self).validate()
0.006557
def get_json_report_object(self, key): """ Retrieve a JSON report object of the report. :param key: The key of the report object :return: The deserialized JSON report object. """ con = ConnectionManager().get_connection(self._connection_alias) return con.get_json(self.json_report_objects[key], append_base_url=False)
0.008021
def _process_scalar_value(name, parse_fn, var_type, m_dict, values, results_dictionary): """Update results_dictionary with a scalar value. Used to update the results_dictionary to be returned by parse_values when encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) Mutates results_dictionary. Args: name: Name of variable in assignment ("s" or "arr"). parse_fn: Function for parsing the actual value. var_type: Type of named variable. m_dict: Dictionary constructed from regex parsing. m_dict['val']: RHS value (scalar) m_dict['index']: List index value (or None) values: Full expression being parsed results_dictionary: The dictionary being updated for return by the parsing function. Raises: ValueError: If the name has already been used. """ try: parsed_value = parse_fn(m_dict['val']) except ValueError: _parse_fail(name, var_type, m_dict['val'], values) # If no index is provided if not m_dict['index']: if name in results_dictionary: _reuse_fail(name, values) results_dictionary[name] = parsed_value else: if name in results_dictionary: # The name has already been used as a scalar, then it # will be in this dictionary and map to a non-dictionary. if not isinstance(results_dictionary.get(name), dict): _reuse_fail(name, values) else: results_dictionary[name] = {} index = int(m_dict['index']) # Make sure the index position hasn't already been assigned a value. if index in results_dictionary[name]: _reuse_fail('{}[{}]'.format(name, index), values) results_dictionary[name][index] = parsed_value
0.007616
def concatenate_aiml(path='aiml-en-us-foundation-alice.v1-9.zip', outfile='aiml-en-us-foundation-alice.v1-9.aiml'): """Strip trailing </aiml> tag and concatenate all valid AIML files found in the ZIP.""" path = find_data_path(path) or path zf = zipfile.ZipFile(path) for name in zf.namelist(): if not name.lower().endswith('.aiml'): continue with zf.open(name) as fin: happyending = '#!*@!!BAD' for i, line in enumerate(fin): try: line = line.decode('utf-8').strip() except UnicodeDecodeError: line = line.decode('ISO-8859-1').strip() if line.lower().startswith('</aiml>') or line.lower().endswith('</aiml>'): happyending = (i, line) break else: pass if happyending != (i, line): print('Invalid AIML format: {}\nLast line (line number {}) was: {}\nexpected "</aiml>"'.format( name, i, line))
0.00466
def readAxes(self): """ Read the axes element. """ for axisElement in self.root.findall(".axes/axis"): axis = {} axis['name'] = name = axisElement.attrib.get("name") axis['tag'] = axisElement.attrib.get("tag") axis['minimum'] = float(axisElement.attrib.get("minimum")) axis['maximum'] = float(axisElement.attrib.get("maximum")) axis['default'] = float(axisElement.attrib.get("default")) # we're not using the map for anything. axis['map'] = [] for warpPoint in axisElement.findall(".map"): inputValue = float(warpPoint.attrib.get("input")) outputValue = float(warpPoint.attrib.get("output")) axis['map'].append((inputValue, outputValue)) # there are labelnames in the element # but we don't need them for building the fonts. self.axes[name] = axis self.axesOrder.append(axis['name'])
0.001978
def add_logging_handler(handler, format_='file'): """ mostly for util_logging internals """ global __UTOOL_ROOT_LOGGER__ if __UTOOL_ROOT_LOGGER__ is None: builtins.print('[WARNING] logger not started, cannot add handler') return # create formatter and add it to the handlers #logformat = '%Y-%m-%d %H:%M:%S' #logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' timeformat = '%H:%M:%S' if format_ == 'file': logformat = '[%(asctime)s]%(message)s' elif format_ == 'stdout': logformat = '%(message)s' else: raise AssertionError('unknown logging format_: %r' % format_) # Create formatter for handlers formatter = logging.Formatter(logformat, timeformat) handler.setLevel(logging.DEBUG) handler.setFormatter(formatter) __UTOOL_ROOT_LOGGER__.addHandler(handler)
0.003413
async def get_user_data(self): """Get Tautulli userdata.""" userdata = {} sessions = self.session_data.get('sessions', {}) try: async with async_timeout.timeout(8, loop=self._loop): for username in self.tautulli_users: userdata[username] = {} userdata[username]['Activity'] = None for session in sessions: if session['username'].lower() == username.lower(): userdata[username]['Activity'] = session['state'] for key in session: if key != 'Username': userdata[username][key] = session[key] break self.tautulli_user_data = userdata except (asyncio.TimeoutError, aiohttp.ClientError, KeyError): msg = "Can not load data from Tautulli." logger(msg, 40)
0.002022
def rel_path_from_chan_path(chan_path, channeldir, windows=False): """ Convert `chan_path` as obtained from a metadata provider into a `rel_path` suitable for accessing the file from the current working directory, e.g., >>> rel_path_from_chan_path('Open Stax/Math', 'content/open_stax_zip/Open Stax') 'content/open_stax_zip/Open Stax/Math' """ if windows: chan_path_list = chan_path.split('\\') else: chan_path_list = chan_path.split('/') chan_path_list.pop(0) # remove the channel root dir rel_path = os.path.join(channeldir, *chan_path_list) return rel_path
0.003226
def makeBiDirectional(d): """ Helper for generating tagNameConverter Makes dict that maps from key to value and back """ dTmp = d.copy() for k in d: dTmp[d[k]] = k return dTmp
0.004739
def addTransitions(self, state, transitions): """ Create a new L{TransitionTable} with all the same transitions as this L{TransitionTable} plus a number of new transitions. @param state: The state for which the new transitions are defined. @param transitions: A L{dict} mapping inputs to output, nextState pairs. Each item from this L{dict} will define a new transition in C{state}. @return: The newly created L{TransitionTable}. """ table = self._copy() state = table.table.setdefault(state, {}) for (input, (output, nextState)) in transitions.items(): state[input] = Transition(output, nextState) return table
0.00271
def _update_column_info(self): """ Used for validation during parsing, and additional book-keeping. For internal use only. """ del self.columnnames[:] del self.columntypes[:] del self.columnpytypes[:] for child in self.getElementsByTagName(ligolw.Column.tagName): if self.validcolumns is not None: try: if self.validcolumns[child.Name] != child.Type: raise ligolw.ElementError("invalid type '%s' for Column '%s' in Table '%s', expected type '%s'" % (child.Type, child.getAttribute("Name"), self.getAttribute("Name"), self.validcolumns[child.Name])) except KeyError: raise ligolw.ElementError("invalid Column '%s' for Table '%s'" % (child.getAttribute("Name"), self.getAttribute("Name"))) if child.Name in self.columnnames: raise ligolw.ElementError("duplicate Column '%s' in Table '%s'" % (child.getAttribute("Name"), self.getAttribute("Name"))) self.columnnames.append(child.Name) self.columntypes.append(child.Type) try: self.columnpytypes.append(ligolwtypes.ToPyType[child.Type]) except KeyError: raise ligolw.ElementError("unrecognized Type '%s' for Column '%s' in Table '%s'" % (child.Type, child.getAttribute("Name"), self.getAttribute("Name")))
0.022913
def install_python_module(name): """ instals a python module using pip """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=False, capture=True): run('pip --quiet install %s' % name)
0.004149
def _Fierz_to_EOS_V(Fsbuu,Fsbdd,Fsbcc,Fsbss,Fsbbb,parameters): p = parameters V = ckmutil.ckm.ckm_tree(p["Vus"], p["Vub"], p["Vcb"], p["delta"]) Vtb = V[2,2] Vts = V[2,1] """From Fierz to the EOS basis for b -> s transitions. The arguments are dictionaries of the corresponding Fierz bases """ dic = { 'b->s::c1' : -Fsbbb['Fsbbb1']/3 + 2*Fsbcc['Fsbcc1'] - 2 * Fsbdd['Fsbdd1'] / 3 + Fsbdd['Fsbdd2']/3 - Fsbss['Fsbss1'] / 3 - 2 * Fsbuu['Fsbuu1'] / 3 + Fsbuu['Fsbuu2'] / 3, 'b->s::c2' : -2 * Fsbbb['Fsbbb1'] / 9 + Fsbcc['Fsbcc1'] / 3 + Fsbcc['Fsbcc2'] + Fsbdd['Fsbdd1'] / 18 - 5 * Fsbdd['Fsbdd2'] / 18 - 2 * Fsbss['Fsbss1'] / 9 + Fsbuu['Fsbuu1'] / 18 - 5 * Fsbuu['Fsbuu2'] / 18, 'b->s::c3' : -2 * Fsbbb['Fsbbb1'] / 27 + 4 * Fsbbb['Fsbbb3'] / 15 + 4 * Fsbbb['Fsbbb4'] / 45 + 4 * Fsbcc['Fsbcc3'] / 15 + 4 * Fsbcc['Fsbcc4'] / 45 - 5 * Fsbdd['Fsbdd1'] / 54 + Fsbdd['Fsbdd2'] / 54 + 4 * Fsbdd['Fsbdd3'] / 15 + 4 * Fsbdd['Fsbdd4'] / 45 - 2 * Fsbss['Fsbss1'] / 27 + 4 * Fsbss['Fsbss3'] / 15 + 4 * Fsbss['Fsbss4'] / 45 - 5 * Fsbuu['Fsbuu1'] / 54 + Fsbuu['Fsbuu2'] / 54 + 4 * Fsbuu['Fsbuu3'] / 15 + 4 * Fsbuu['Fsbuu4'] / 45, 'b->s::c4' : -Fsbbb['Fsbbb1'] / 9 + 8 * Fsbbb['Fsbbb4'] / 15 + 8 * Fsbcc['Fsbcc4'] / 15 + Fsbdd['Fsbdd1'] / 9 - 2 * Fsbdd['Fsbdd2'] / 9 + 8 * Fsbdd['Fsbdd4'] / 15 - Fsbss['Fsbss1'] / 9 + 8 * Fsbss['Fsbss4'] / 15 + Fsbuu['Fsbuu1'] / 9 - 2 * Fsbuu['Fsbuu2'] / 9 + 8 * Fsbuu['Fsbuu4'] / 15, 'b->s::c5' : Fsbbb['Fsbbb1'] / 54 - Fsbbb['Fsbbb3'] / 60 - Fsbbb['Fsbbb4'] / 180 - Fsbcc['Fsbcc3'] / 60 - Fsbcc['Fsbcc4'] / 180 + 5 * Fsbdd['Fsbdd1'] / 216 - Fsbdd['Fsbdd2'] / 216 - Fsbdd['Fsbdd3'] / 60 - Fsbdd['Fsbdd4'] / 180 + Fsbss['Fsbss1'] / 54 - Fsbss['Fsbss3'] / 60 - Fsbss['Fsbss4'] / 180 + 5 * Fsbuu['Fsbuu1'] / 216 - Fsbuu['Fsbuu2'] / 216 - Fsbuu['Fsbuu3'] / 60 - Fsbuu['Fsbuu4'] / 180, 'b->s::c6' : Fsbbb['Fsbbb1'] / 36 - Fsbbb['Fsbbb4'] / 30 - Fsbcc['Fsbcc4'] / 30 - Fsbdd['Fsbdd1'] / 36 + Fsbdd['Fsbdd2'] / 18 - Fsbdd['Fsbdd4'] / 30 + Fsbss['Fsbss1'] / 36 - Fsbss['Fsbss4'] / 30 - Fsbuu['Fsbuu1'] / 36 + Fsbuu['Fsbuu2'] / 18 - Fsbuu['Fsbuu4'] / 30 } prefactor = sqrt(2)/p['GF']/Vtb/Vts.conj()/4 return {k: prefactor * v for k,v in dic.items()}
0.007829
def raster(self, path, size, bandtype=gdal.GDT_Byte): """Returns a new Raster instance. gdal.Driver.Create() does not support all formats. Arguments: path -- file object or path as str size -- two or three-tuple of (xsize, ysize, bandcount) bandtype -- GDAL pixel data type """ path = getattr(path, 'name', path) try: is_multiband = len(size) > 2 nx, ny, nbands = size if is_multiband else size + (1,) except (TypeError, ValueError) as exc: exc.args = ('Size must be 2 or 3-item sequence',) raise if nx < 1 or ny < 1: raise ValueError('Invalid raster size %s' % (size,)) # Do not write to a non-empty file. if not self._is_empty(path): raise IOError('%s already exists, open with Raster()' % path) ds = self.Create(path, nx, ny, nbands, bandtype) if not ds: raise ValueError( 'Could not create %s using %s' % (path, str(self))) return Raster(ds)
0.00186
def _install(archive_filename, install_args=()): """Install Setuptools.""" with archive_context(archive_filename): # installing log.warn('Installing Setuptools') if not _python_cmd('setup.py', 'install', *install_args): log.warn('Something went wrong during the installation.') log.warn('See the error message above.') # exitcode will be 2 return 2
0.002336
def _export_dx(self, filename, type=None, typequote='"', **kwargs): """Export the density grid to an OpenDX file. The file format is the simplest regular grid array and it is also understood by VMD's and Chimera's DX reader; PyMOL requires the dx `type` to be set to "double". For the file format see http://opendx.sdsc.edu/docs/html/pages/usrgu068.htm#HDREDF """ root, ext = os.path.splitext(filename) filename = root + '.dx' comments = [ 'OpenDX density file written by gridDataFormats.Grid.export()', 'File format: http://opendx.sdsc.edu/docs/html/pages/usrgu068.htm#HDREDF', 'Data are embedded in the header and tied to the grid positions.', 'Data is written in C array order: In grid[x,y,z] the axis z is fastest', 'varying, then y, then finally x, i.e. z is the innermost loop.' ] # write metadata in comments section if self.metadata: comments.append('Meta data stored with the python Grid object:') for k in self.metadata: comments.append(' ' + str(k) + ' = ' + str(self.metadata[k])) comments.append( '(Note: the VMD dx-reader chokes on comments below this line)') components = dict( positions=OpenDX.gridpositions(1, self.grid.shape, self.origin, self.delta), connections=OpenDX.gridconnections(2, self.grid.shape), data=OpenDX.array(3, self.grid, type=type, typequote=typequote), ) dx = OpenDX.field('density', components=components, comments=comments) dx.write(filename)
0.00233
def search_reference_sets( self, accession=None, md5checksum=None, assembly_id=None): """ Returns an iterator over the ReferenceSets fulfilling the specified conditions. :param str accession: If not null, return the reference sets for which the `accession` matches this string (case-sensitive, exact match). :param str md5checksum: If not null, return the reference sets for which the `md5checksum` matches this string (case-sensitive, exact match). See :class:`ga4gh.protocol.ReferenceSet::md5checksum` for details. :param str assembly_id: If not null, return the reference sets for which the `assembly_id` matches this string (case-sensitive, exact match). :return: An iterator over the :class:`ga4gh.protocol.ReferenceSet` objects defined by the query parameters. """ request = protocol.SearchReferenceSetsRequest() request.accession = pb.string(accession) request.md5checksum = pb.string(md5checksum) request.assembly_id = pb.string(assembly_id) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "referencesets", protocol.SearchReferenceSetsResponse)
0.001526
def make_cache(self, backend=None): """Make a cache to reduce the time of run. Some backends may implemented it. This is temporary API. It may changed or deprecated.""" if backend is None: if self._default_backend is None: backend = DEFAULT_BACKEND_NAME else: backend = self._default_backend return self.__get_backend(backend).make_cache(self.ops, self.n_qubits)
0.006652
def send_key(self, key): """ Send a key to the Horizon box. """ cmd = struct.pack(">BBBBBBH", 4, 1, 0, 0, 0, 0, key) self.con.send(cmd) cmd = struct.pack(">BBBBBBH", 4, 0, 0, 0, 0, 0, key) self.con.send(cmd)
0.008065
def get_vpc_id(self): """Gets the VPC ID for this EC2 instance :return: String instance ID or None """ log = logging.getLogger(self.cls_logger + '.get_vpc_id') # Exit if not running on AWS if not self.is_aws: log.info('This machine is not running in AWS, exiting...') return if self.instance_id is None: log.error('Unable to get the Instance ID for this machine') return log.info('Found Instance ID: {i}'.format(i=self.instance_id)) log.info('Querying AWS to get the VPC ID...') try: response = self.client.describe_instances( DryRun=False, InstanceIds=[self.instance_id]) except ClientError as ex: log.error('Unable to query AWS to get info for instance {i}\n{e}'.format( i=self.instance_id, e=ex)) return # Get the VPC ID from the response try: vpc_id = response['Reservations'][0]['Instances'][0]['VpcId'] except KeyError as ex: log.error('Unable to get VPC ID from response: {r}\n{e}'.format(r=response, e=ex)) return log.info('Found VPC ID: {v}'.format(v=vpc_id)) return vpc_id
0.003089