code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def push_channel(self, content, channel, content_url=None): '''Push a notification to a Pushed channel. Param: content -> content of Pushed notification message channel -> string identifying a Pushed channel content_url (optional) -> enrich message with URL Returns Shipment ID as string ''' parameters = { 'app_key': self.app_key, 'app_secret': self.app_secret, 'target_alias': channel } return self._push(content, 'channel', parameters, content_url)
Push a notification to a Pushed channel. Param: content -> content of Pushed notification message channel -> string identifying a Pushed channel content_url (optional) -> enrich message with URL Returns Shipment ID as string
def singleton(cls): """ See <Singleton> design pattern for detail: http://www.oodesign.com/singleton-pattern.html Python <Singleton> reference: http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python Recommend use Singleton as a metaclass Usage: @singleton class MyClass(object): pass """ instances = {} def get_instance(*args, **kwargs): if cls not in instances: instances[cls] = cls(*args, **kwargs) return instances[cls] return get_instance
See <Singleton> design pattern for detail: http://www.oodesign.com/singleton-pattern.html Python <Singleton> reference: http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python Recommend use Singleton as a metaclass Usage: @singleton class MyClass(object): pass
def remove_ancestors_of(self, node): """Remove all of the ancestor operation nodes of node.""" if isinstance(node, int): warnings.warn('Calling remove_ancestors_of() with a node id is deprecated,' ' use a DAGNode instead', DeprecationWarning, 2) node = self._id_to_node[node] anc = nx.ancestors(self._multi_graph, node) # TODO: probably better to do all at once using # multi_graph.remove_nodes_from; same for related functions ... for anc_node in anc: if anc_node.type == "op": self.remove_op_node(anc_node)
Remove all of the ancestor operation nodes of node.
def _LegacyCheckHashesWithFileStore(self): """Check all queued up hashes for existence in file store (legacy). Hashes which do not exist in the file store will be downloaded. This function flushes the entire queue (self.state.pending_hashes) in order to minimize the round trips to the file store. If a file was found in the file store it is copied from there into the client's VFS namespace. Otherwise, we request the client to hash every block in the file, and add it to the file tracking queue (self.state.pending_files). """ if not self.state.pending_hashes: return # This map represents all the hashes in the pending urns. file_hashes = {} # Store a mapping of hash to tracker. Keys are hashdigest objects, # values are arrays of tracker dicts. hash_to_tracker = {} for index, tracker in iteritems(self.state.pending_hashes): # We might not have gotten this hash yet if tracker.get("hash_obj") is None: continue hash_obj = tracker["hash_obj"] digest = hash_obj.sha256 file_hashes[index] = hash_obj hash_to_tracker.setdefault(digest, []).append(tracker) # First we get all the files which are present in the file store. files_in_filestore = {} # TODO(amoser): This object never changes, could this be a class attribute? filestore_obj = aff4.FACTORY.Open( legacy_filestore.FileStore.PATH, legacy_filestore.FileStore, mode="r", token=self.token) for file_store_urn, hash_obj in filestore_obj.CheckHashes( itervalues(file_hashes), external=self.state.use_external_stores): # Since checkhashes only returns one digest per unique hash we need to # find any other files pending download with the same hash. for tracker in hash_to_tracker[hash_obj.sha256]: self.state.files_skipped += 1 file_hashes.pop(tracker["index"]) files_in_filestore[file_store_urn] = hash_obj # Remove this tracker from the pending_hashes store since we no longer # need to process it. self.state.pending_hashes.pop(tracker["index"]) # Now that the check is done, reset our counter self.state.files_hashed_since_check = 0 # Now copy all existing files to the client aff4 space. for filestore_file_urn, hash_obj in iteritems(files_in_filestore): for file_tracker in hash_to_tracker.get(hash_obj.sha256, []): stat_entry = file_tracker["stat_entry"] # Copy the existing file from the filestore to the client namespace. target_urn = stat_entry.pathspec.AFF4Path(self.client_urn) aff4.FACTORY.Copy( filestore_file_urn, target_urn, update_timestamps=True) with aff4.FACTORY.Open( target_urn, mode="rw", token=self.token) as new_fd: new_fd.Set(new_fd.Schema.STAT, stat_entry) # Due to potential filestore corruption, the existing files # can have 0 size. if new_fd.size == 0: new_fd.size = (file_tracker["bytes_read"] or stat_entry.st_size) if data_store.RelationalDBEnabled(): path_info = rdf_objects.PathInfo.FromStatEntry(stat_entry) path_info.hash_entry = hash_obj data_store.REL_DB.WritePathInfos(self.client_id, [path_info]) # Add this file to the filestore index. filestore_obj.AddURNToIndex(str(hash_obj.sha256), target_urn) # Report this hit to the flow's caller. self._ReceiveFetchedFile(file_tracker) # Now we iterate over all the files which are not in the store and arrange # for them to be copied. for index in file_hashes: # Move the tracker from the pending hashes store to the pending files # store - it will now be downloaded. file_tracker = self.state.pending_hashes.pop(index) self.state.pending_files[index] = file_tracker # If we already know how big the file is we use that, otherwise fall back # to the size reported by stat. if file_tracker["bytes_read"] > 0: file_tracker["size_to_download"] = file_tracker["bytes_read"] else: file_tracker["size_to_download"] = file_tracker["stat_entry"].st_size # We do not have the file here yet - we need to retrieve it. expected_number_of_hashes = ( file_tracker["size_to_download"] // self.CHUNK_SIZE + 1) # We just hash ALL the chunks in the file now. NOTE: This maximizes client # VFS cache hit rate and is far more efficient than launching multiple # GetFile flows. self.state.files_to_fetch += 1 for i in range(expected_number_of_hashes): if i == expected_number_of_hashes - 1: # The last chunk is short. length = file_tracker["size_to_download"] % self.CHUNK_SIZE else: length = self.CHUNK_SIZE self.CallClient( server_stubs.HashBuffer, pathspec=file_tracker["stat_entry"].pathspec, offset=i * self.CHUNK_SIZE, length=length, next_state="CheckHash", request_data=dict(index=index)) if self.state.files_hashed % 100 == 0: self.Log("Hashed %d files, skipped %s already stored.", self.state.files_hashed, self.state.files_skipped)
Check all queued up hashes for existence in file store (legacy). Hashes which do not exist in the file store will be downloaded. This function flushes the entire queue (self.state.pending_hashes) in order to minimize the round trips to the file store. If a file was found in the file store it is copied from there into the client's VFS namespace. Otherwise, we request the client to hash every block in the file, and add it to the file tracking queue (self.state.pending_files).
def lookup(self, query=''): """looks up all contacts where name or address match query""" res = [] query = re.compile('.*%s.*' % re.escape(query), self.reflags) for name, email in self.get_contacts(): if query.match(name) or query.match(email): res.append((name, email)) return res
looks up all contacts where name or address match query
def _get_image_workaround_seek(self, idx): """Same as __getitem__ but seek through the video beforehand This is a workaround for an all-zero image returned by `imageio`. """ warnings.warn("imageio workaround used!") cap = self.video_handle mult = 50 for ii in range(idx//mult): cap.get_data(ii*mult) final = cap.get_data(idx) return final
Same as __getitem__ but seek through the video beforehand This is a workaround for an all-zero image returned by `imageio`.
def convert_units(self, desired, guess=False): """ Convert the units of the mesh into a specified unit. Parameters ---------- desired : string Units to convert to (eg 'inches') guess : boolean If self.units are not defined should we guess the current units of the document and then convert? """ units._convert_units(self, desired, guess) return self
Convert the units of the mesh into a specified unit. Parameters ---------- desired : string Units to convert to (eg 'inches') guess : boolean If self.units are not defined should we guess the current units of the document and then convert?
def mine(self): # pragma: no cover """ Search for domain or URL related to the original URL or domain. :return: The mined domains or URL. :rtype: dict """ if PyFunceble.CONFIGURATION["mining"]: # The mining is activated. try: # We get the history. history = PyFunceble.requests.get( self.to_get, timeout=PyFunceble.CONFIGURATION["seconds_before_http_timeout"], headers=self.headers, ).history # We initiate a dictionnary which will save the # list of mined links. mined = {self.to_get_bare: []} for element in history: # We loop through the history. # We update the element. element = element.url if PyFunceble.INTERN["to_test_type"] == "url": # We are testing a full url. # We get the element to append. to_append = Check().is_url_valid(element, return_base=False) elif PyFunceble.INTERN["to_test_type"] == "domain": # We are testing a domain. # We get the element to append. to_append = Check().is_url_valid(element, return_base=True) else: raise Exception("Unknown tested.") if to_append: # There is something to append. if to_append.endswith(":80"): # The port is present. # We get rid of it. to_append = to_append[:-3] if to_append != self.to_get_bare: # The element to append is different as # the element we are globally testing. # We append the element to append to the # list of mined links. mined[self.to_get_bare].append(to_append) if mined[self.to_get_bare]: # There is something in the list of mined links. # We return the whole element. return mined # There is nothing in the list of mined links. # We return None. return None except ( PyFunceble.requests.ConnectionError, PyFunceble.requests.exceptions.Timeout, PyFunceble.requests.exceptions.InvalidURL, PyFunceble.socket.timeout, urllib3_exceptions.InvalidHeader, UnicodeDecodeError, # The probability that this happend in production is minimal. ): # Something went wrong. # We return None. return None return None
Search for domain or URL related to the original URL or domain. :return: The mined domains or URL. :rtype: dict
def _write_comparison_plot_table(spid, models, options, core_results, fit_results): """ Notes ----- Only applies to analysis using functions from empirical in which models are also given. """ # TODO: Clean up sorting, may not work if SAR x out of order, e.g. is_curve = 'x' in core_results[0][1] df = core_results[spid][1] df.rename(columns={'y': 'empirical'}, inplace=True) # If distribution, need to sort values so will match sorted rank in fits if not is_curve: x = np.arange(len(df)) + 1 df = df.sort(columns='empirical') df.insert(0, 'x', x[::-1]) # Add residual column for each model for model in models: fit_result = fit_results[spid][model] df[model] = fit_result[1] df[model + "_residual"] = df[model] - df['empirical'] # If curve, sort now for plotting purposes if is_curve: df = df.sort(columns='x') # Set up file paths f_path = _get_file_path(spid, options, 'data_models.csv') p_path = _get_file_path(spid, options, 'data_models.pdf') # Save table df.to_csv(f_path, index=False, float_format='%.4f') # Table # Save plot fig, (ax1, ax2) = plt.subplots(1, 2) ax1.scatter(df['x'], df['empirical'], color='k') ax1.plot(df['x'], df[models]) ax1.legend(models + ['empirical'], loc='best') ax1.set_xlabel('x') ax1.set_ylabel('value') ax2.hlines(0, np.min(df['x']), np.max(df['x'])) ax2.plot(df['x'], df[[x + '_residual' for x in models]]) ax2.legend(models + ['empirical'], loc='best') ax2.set_xlabel('x') ax2.set_ylabel('residual') ax2.set_xlim(ax1.get_xlim()) ax2.set_ylim(min(ax2.get_ylim()[0], -1), max(ax2.get_ylim()[1], 1)) if options.get('log_y', None): ax1.set_yscale('log') ax2.set_yscale('symlog', linthreshy=1) if options.get('log_x', None): ax1.set_xscale('log') ax2.set_xscale('log') if not options.get('log_x', None) and not options.get('log_y', None): ax1.set_ylim(bottom=0) ax1.set_xlim(left=0) ax1 = _pad_plot_frame(ax1) ax2 = _pad_plot_frame(ax2) with warnings.catch_warnings(): warnings.simplefilter("ignore") fig.tight_layout() fig.savefig(p_path) plt.close('all')
Notes ----- Only applies to analysis using functions from empirical in which models are also given.
def _pload(offset, size): """ Generic parameter loading. Emmits output code for setting IX at the right location. size = Number of bytes to load: 1 => 8 bit value 2 => 16 bit value / string 4 => 32 bit value / f16 value 5 => 40 bit value """ output = [] indirect = offset[0] == '*' if indirect: offset = offset[1:] I = int(offset) if I >= 0: # If it is a parameter, round up to even bytes I += 4 + (size % 2 if not indirect else 0) # Return Address + "push IX" ix_changed = (indirect or size < 5) and (abs(I) + size) > 127 # Offset > 127 bytes. Need to change IX if ix_changed: # more than 1 byte output.append('push ix') output.append('ld de, %i' % I) output.append('add ix, de') I = 0 elif size == 5: # For floating point numbers we always use DE as IX offset output.append('push ix') output.append('pop hl') output.append('ld de, %i' % I) output.append('add hl, de') I = 0 if indirect: output.append('ld h, (ix%+i)' % (I + 1)) output.append('ld l, (ix%+i)' % I) if size == 1: output.append('ld a, (hl)') elif size == 2: output.append('ld c, (hl)') output.append('inc hl') output.append('ld h, (hl)') output.append('ld l, c') elif size == 4: output.append('call __ILOAD32') REQUIRES.add('iload32.asm') else: # Floating point output.append('call __ILOADF') REQUIRES.add('iloadf.asm') else: if size == 1: output.append('ld a, (ix%+i)' % I) else: if size <= 4: # 16/32bit integer, low part output.append('ld l, (ix%+i)' % I) output.append('ld h, (ix%+i)' % (I + 1)) if size > 2: # 32 bit integer, high part output.append('ld e, (ix%+i)' % (I + 2)) output.append('ld d, (ix%+i)' % (I + 3)) else: # Floating point output.append('call __PLOADF') REQUIRES.add('ploadf.asm') if ix_changed: output.append('pop ix') return output
Generic parameter loading. Emmits output code for setting IX at the right location. size = Number of bytes to load: 1 => 8 bit value 2 => 16 bit value / string 4 => 32 bit value / f16 value 5 => 40 bit value
def iterfd(fd): ''' Generator which unpacks a file object of msgpacked content. Args: fd: File object to consume data from. Notes: String objects are decoded using utf8 encoding. In order to handle potentially malformed input, ``unicode_errors='surrogatepass'`` is set to allow decoding bad input strings. Yields: Objects from a msgpack stream. ''' unpk = msgpack.Unpacker(fd, **unpacker_kwargs) for mesg in unpk: yield mesg
Generator which unpacks a file object of msgpacked content. Args: fd: File object to consume data from. Notes: String objects are decoded using utf8 encoding. In order to handle potentially malformed input, ``unicode_errors='surrogatepass'`` is set to allow decoding bad input strings. Yields: Objects from a msgpack stream.
def toggle(self, rows): 'Toggle selection of given `rows`.' for r in Progress(rows, 'toggling', total=len(self.rows)): if not self.unselectRow(r): self.selectRow(r)
Toggle selection of given `rows`.
def get_index(self, field_name, catalog): """Returns the index of the catalog for the given field_name, if any """ index = catalog.Indexes.get(field_name, None) if not index and field_name == "Title": # Legacy return self.get_index("sortable_title", catalog) return index
Returns the index of the catalog for the given field_name, if any
def dirsplit(path): r""" Args: path (str): Returns: list: components of the path CommandLine: python -m utool.util_path --exec-dirsplit Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> paths = [] >>> paths.append('E:/window file/foo') >>> paths.append('/normal/foo') >>> paths.append('~/relative/path') >>> results = [dirsplit(path) for path in paths] >>> import re >>> results2 = [re.split('\\/', path) for path in paths] >>> print(results2) >>> result = ut.repr2(results) >>> print(result) """ #return path.split(os.sep) parts = [] remain = path part = True #while True: while part != '' and remain != '': remain, part = split(remain) parts.append(part) parts = [p for p in parts if p != ''] if remain != '': parts.append(remain) parts = parts[::-1] return parts
r""" Args: path (str): Returns: list: components of the path CommandLine: python -m utool.util_path --exec-dirsplit Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> paths = [] >>> paths.append('E:/window file/foo') >>> paths.append('/normal/foo') >>> paths.append('~/relative/path') >>> results = [dirsplit(path) for path in paths] >>> import re >>> results2 = [re.split('\\/', path) for path in paths] >>> print(results2) >>> result = ut.repr2(results) >>> print(result)
def get_token_func(): """ This function makes a call to AAD to fetch an OAuth token :return: the OAuth token and the interval to wait before refreshing it """ print("{}: token updater was triggered".format(datetime.datetime.now())) # in this example, the OAuth token is obtained using the ADAL library # however, the user can use any preferred method context = adal.AuthenticationContext( str.format("https://login.microsoftonline.com/{}", settings.ACTIVE_DIRECTORY_TENANT_ID), api_version=None, validate_authority=True) oauth_token = context.acquire_token_with_client_credentials( "https://storage.azure.com", settings.ACTIVE_DIRECTORY_APPLICATION_ID, settings.ACTIVE_DIRECTORY_APPLICATION_SECRET) # return the token itself and the interval to wait before this function should be called again # generally oauth_token['expiresIn'] - 180 is a good interval to give, as it tells the caller to # refresh the token 3 minutes before it expires, so here we are assuming that the token expiration # is at least longer than 3 minutes, the user should adjust it according to their AAD policy return oauth_token['accessToken'], oauth_token['expiresIn'] - 180
This function makes a call to AAD to fetch an OAuth token :return: the OAuth token and the interval to wait before refreshing it
def list_all_customers(cls, **kwargs): """List Customers Return a list of Customers This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_customers(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Customer] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_customers_with_http_info(**kwargs) else: (data) = cls._list_all_customers_with_http_info(**kwargs) return data
List Customers Return a list of Customers This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_customers(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Customer] If the method is called asynchronously, returns the request thread.
def libvlc_media_get_mrl(p_md): '''Get the media resource locator (mrl) from a media descriptor object. @param p_md: a media descriptor object. @return: string with mrl of media descriptor object. ''' f = _Cfunctions.get('libvlc_media_get_mrl', None) or \ _Cfunction('libvlc_media_get_mrl', ((1,),), string_result, ctypes.c_void_p, Media) return f(p_md)
Get the media resource locator (mrl) from a media descriptor object. @param p_md: a media descriptor object. @return: string with mrl of media descriptor object.
def update_url(self, url=None, regex=None): """ Accepts a fully-qualified url, or regex. Returns True if successful, False if not successful. """ if not url and not regex: raise ValueError("Neither a url or regex was provided to update_url.") headers = { 'X-Prerender-Token': self.token, 'Content-Type': 'application/json', } data = { 'prerenderToken': settings.PRERENDER_TOKEN, } if url: data["url"] = url if regex: data["regex"] = regex r = self.session.post(self.RECACHE_URL, headers=headers, data=data) return r.status_code < 500
Accepts a fully-qualified url, or regex. Returns True if successful, False if not successful.
def _get_component_from_result(self, result, lookup): """ Helper function to get a particular address component from a Google result. Since the address components in results are an array of objects containing a types array, we have to search for a particular component rather than being able to look it up directly. Returns the first match, so this should be used for unique component types (e.g. 'locality'), not for categories (e.g. 'political') that can describe multiple components. :arg dict result: A results dict with an 'address_components' key, as returned by the Google geocoder. :arg dict lookup: The type (e.g. 'street_number') and key ('short_name' or 'long_name') of the desired address component value. :returns: address component or empty string """ for component in result['address_components']: if lookup['type'] in component['types']: return component.get(lookup['key'], '') return ''
Helper function to get a particular address component from a Google result. Since the address components in results are an array of objects containing a types array, we have to search for a particular component rather than being able to look it up directly. Returns the first match, so this should be used for unique component types (e.g. 'locality'), not for categories (e.g. 'political') that can describe multiple components. :arg dict result: A results dict with an 'address_components' key, as returned by the Google geocoder. :arg dict lookup: The type (e.g. 'street_number') and key ('short_name' or 'long_name') of the desired address component value. :returns: address component or empty string
def update_machine_state(state_path): """Update the machine state using the provided state declaration.""" charmhelpers.contrib.templating.contexts.juju_state_to_yaml( salt_grains_path) subprocess.check_call([ 'salt-call', '--local', 'state.template', state_path, ])
Update the machine state using the provided state declaration.
def friendly_load(parser, token): """ Tries to load a custom template tag set. Non existing tag libraries are ignored. This means that, if used in conjunction with ``if_has_tag``, you can try to load the comments template tag library to enable comments even if the comments framework is not installed. For example:: {% load friendly_loader %} {% friendly_load comments webdesign %} {% if_has_tag render_comment_list %} {% render_comment_list for obj %} {% else %} {% if_has_tag lorem %} {% lorem %} {% endif_has_tag %} {% endif_has_tag %} """ bits = token.contents.split() if len(bits) >= 4 and bits[-2] == "from": # from syntax is used; load individual tags from the library name = bits[-1] try: lib = find_library(parser, name) subset = load_from_library(lib, name, bits[1:-2]) parser.add_library(subset) except TemplateSyntaxError: pass else: # one or more libraries are specified; load and add them to the parser for name in bits[1:]: try: lib = find_library(parser, name) parser.add_library(lib) except TemplateSyntaxError: pass return LoadNode()
Tries to load a custom template tag set. Non existing tag libraries are ignored. This means that, if used in conjunction with ``if_has_tag``, you can try to load the comments template tag library to enable comments even if the comments framework is not installed. For example:: {% load friendly_loader %} {% friendly_load comments webdesign %} {% if_has_tag render_comment_list %} {% render_comment_list for obj %} {% else %} {% if_has_tag lorem %} {% lorem %} {% endif_has_tag %} {% endif_has_tag %}
def _update_pwm(self): """Update the pwm values of the driver regarding the current state.""" if self._is_on: values = self._get_pwm_values() else: values = [0] * len(self._driver.pins) self._driver.set_pwm(values)
Update the pwm values of the driver regarding the current state.
def fill_package(app_name, build_dir=None, install_dir=None): """ Creates the theme package (.zip) from templates and optionally assets installed in the ``build_dir``. """ zip_path = os.path.join(install_dir, '%s.zip' % app_name) with zipfile.ZipFile(zip_path, 'w') as zip_file: fill_package_zip(zip_file, os.path.dirname(build_dir), prefix=app_name) return zip_path
Creates the theme package (.zip) from templates and optionally assets installed in the ``build_dir``.
def _path_to_id(path): """ Name of the root directory is used as ``<packageid>`` in ``info.xml``. This function makes sure, that :func:`os.path.basename` doesn't return blank string in case that there is `/` at the end of the `path`. Args: path (str): Path to the root directory. Returns: str: Basename of the `path`. """ if path.endswith("/"): path = path[:-1] return os.path.basename(path)
Name of the root directory is used as ``<packageid>`` in ``info.xml``. This function makes sure, that :func:`os.path.basename` doesn't return blank string in case that there is `/` at the end of the `path`. Args: path (str): Path to the root directory. Returns: str: Basename of the `path`.
def as_dtype(type_value): """Converts the given `type_value` to a `DType`. Args: type_value: A value that can be converted to a `tf.DType` object. This may currently be a `tf.DType` object, a [`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto), a string type name, or a `numpy.dtype`. Returns: A `DType` corresponding to `type_value`. Raises: TypeError: If `type_value` cannot be converted to a `DType`. """ if isinstance(type_value, DType): return type_value try: return _INTERN_TABLE[type_value] except KeyError: pass try: return _STRING_TO_TF[type_value] except KeyError: pass try: return _PYTHON_TO_TF[type_value] except KeyError: pass if isinstance(type_value, np.dtype): # The numpy dtype for strings is variable length. We can not compare # dtype with a single constant (np.string does not exist) to decide # dtype is a "string" type. We need to compare the dtype.type to be # sure it's a string type. if type_value.type == np.string_ or type_value.type == np.unicode_: return string if isinstance(type_value, (type, np.dtype)): for key, val in _NP_TO_TF: try: if key == type_value: return val except TypeError as e: raise TypeError( "Cannot convert {} to a dtype. {}".format(type_value, e) ) raise TypeError("Cannot convert value %r to a TensorFlow DType." % type_value)
Converts the given `type_value` to a `DType`. Args: type_value: A value that can be converted to a `tf.DType` object. This may currently be a `tf.DType` object, a [`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto), a string type name, or a `numpy.dtype`. Returns: A `DType` corresponding to `type_value`. Raises: TypeError: If `type_value` cannot be converted to a `DType`.
def _make_concept(self, entity): """Return Concept from a Hume entity.""" # Use the canonical name as the name of the Concept by default name = self._sanitize(entity['canonicalName']) # But if there is a trigger head text, we prefer that since # it almost always results in a cleaner name # This is removed for now since the head word seems to be too # minimal for some concepts, e.g. it gives us only "security" # for "food security". """ trigger = entity.get('trigger') if trigger is not None: head_text = trigger.get('head text') if head_text is not None: name = head_text """ # Save raw text and Hume scored groundings as db_refs db_refs = _get_grounding(entity) concept = Concept(name, db_refs=db_refs) metadata = {arg['type']: arg['value']['@id'] for arg in entity['arguments']} return concept, metadata
Return Concept from a Hume entity.
def populate(self, compound_dict=None, x=1, y=1, z=1): """Expand lattice and create compound from lattice. populate will expand lattice based on user input. The user must also pass in a dictionary that contains the keys that exist in the basis_dict. The corresponding Compound will be the full lattice returned to the user. If no dictionary is passed to the user, Dummy Compounds will be used. Parameters ---------- x : int, optional, default=1 How many iterations in the x direction. y : int, optional, default=1 How many iterations in the y direction. z : int, optional, default=1 How many iterations in the z direction. compound_dict : dictionary, optional, default=None Link between basis_dict and Compounds. Exceptions Raised ----------------- ValueError : incorrect x,y, or z values. TypeError : incorrect type for basis vector Call Restrictions ----------------- Called after constructor by user. """ error_dict = {0: 'X', 1: 'Y', 2: 'Z'} try: x = int(x) y = int(y) z = int(z) except (ValueError, TypeError): raise ValueError('Cannot convert replication amounts into ' 'integers. x= {}, y= {}, z= {} needs to ' 'be an int.'.format(x, y, z)) for replication_amount in x, y, z: if replication_amount is None: raise ValueError('Attempt to replicate None times. ' 'None is not an acceptable replication ' 'amount, 1 is the default.') for replication_amount, index in zip([x, y, z], range(3)): if replication_amount < 1: raise ValueError('Incorrect populate value: {} : {} is < 1. ' .format(error_dict[index], replication_amount)) if ((isinstance(compound_dict, dict)) or (compound_dict is None)): pass else: raise TypeError('Compound dictionary is not of type dict. ' '{} was passed.'.format(type(compound_dict))) cell = defaultdict(list) [a, b, c] = self.lattice_spacing transform_mat = self.lattice_vectors # unit vectors transform_mat = np.asarray(transform_mat, dtype=np.float64) transform_mat = np.reshape(transform_mat, newshape=(3,3)) norms = np.linalg.norm(transform_mat, axis=1) # normalized vectors for change of basis unit_vecs = np.divide(transform_mat.transpose(), norms) for key, locations in self.lattice_points.items(): for coords in locations: for replication in it.product(range(x), range(y), range(z)): temp_location = list() new_coords = np.asarray(coords, dtype=np.float64) new_coords = np.reshape(new_coords, (1, 3), order='C') new_coords[0][0] = new_coords[0][0] + replication[0] new_coords[0][1] = new_coords[0][1] + replication[1] new_coords[0][2] = new_coords[0][2] + replication[2] # change of basis to cartesian new_coords = np.dot(unit_vecs, new_coords.transpose()) new_coords[0] = new_coords[0] * a new_coords[1] = new_coords[1] * b new_coords[2] = new_coords[2] * c new_coords = np.reshape(new_coords, (1, 3), order='C') tuple_of_coords = tuple(new_coords.flatten()) cell[key].append(tuple_of_coords) ret_lattice = mb.Compound() if compound_dict is None: for key_id, all_pos in cell.items(): particle = mb.Compound(name=key_id, pos=[0, 0, 0]) for pos in all_pos: particle_to_add = mb.clone(particle) particle_to_add.translate_to(list(pos)) ret_lattice.add(particle_to_add) else: for key_id, all_pos in cell.items(): if isinstance(compound_dict[key_id], mb.Compound): compound_to_move = compound_dict[key_id] for pos in all_pos: tmp_comp = mb.clone(compound_to_move) tmp_comp.translate_to(list(pos)) ret_lattice.add(tmp_comp) else: err_type = type(compound_dict.get(key_id)) raise TypeError('Invalid type in provided Compound ' 'dictionary. For key {}, type: {} was ' 'provided, not mbuild.Compound.' .format(key_id, err_type)) # set periodicity ret_lattice.periodicity = np.asarray([a * x, b * y, c * z], dtype=np.float64) warn('Periodicity of non-rectangular lattices are not valid with ' 'default boxes. Only rectangular lattices are valid ' 'at this time.') # if coordinates are below a certain threshold, set to 0 tolerance = 1e-12 ret_lattice.xyz_with_ports[ret_lattice.xyz_with_ports <= tolerance] = 0. return ret_lattice
Expand lattice and create compound from lattice. populate will expand lattice based on user input. The user must also pass in a dictionary that contains the keys that exist in the basis_dict. The corresponding Compound will be the full lattice returned to the user. If no dictionary is passed to the user, Dummy Compounds will be used. Parameters ---------- x : int, optional, default=1 How many iterations in the x direction. y : int, optional, default=1 How many iterations in the y direction. z : int, optional, default=1 How many iterations in the z direction. compound_dict : dictionary, optional, default=None Link between basis_dict and Compounds. Exceptions Raised ----------------- ValueError : incorrect x,y, or z values. TypeError : incorrect type for basis vector Call Restrictions ----------------- Called after constructor by user.
def projR(gamma, p): """return the KL projection on the row constrints """ return np.multiply(gamma.T, p / np.maximum(np.sum(gamma, axis=1), 1e-10)).T
return the KL projection on the row constrints
def _normalize_histogram2d(self, counts, type): """Normalize the values of the counts for a 2D histogram. This normalizes the values of a numpy array to the range 0-255. :param counts: a NumPy array which is to be rescaled. :param type: either 'bw' or 'reverse_bw'. """ counts = (255 * (counts - np.nanmin(counts)) / (np.nanmax(counts) - np.nanmin(counts))) if type == 'reverse_bw': counts = 255 - counts return counts.astype(np.uint8)
Normalize the values of the counts for a 2D histogram. This normalizes the values of a numpy array to the range 0-255. :param counts: a NumPy array which is to be rescaled. :param type: either 'bw' or 'reverse_bw'.
def _buildElementTree(self,): """Turn object into an ElementTree """ t_elt = ctree.Element(self.name) for k,v in [ (key,value) for key,value in self.__dict__.items() if key != 'name']: # Excluding name from list of items if v and v != 'false' : t_elt.set(k if k != 'like' else 'as', str(v).lower()) self._etree = t_elt return t_elt
Turn object into an ElementTree
def _tarboton_slopes_directions(data, dX, dY, facets, ang_adj): """ Calculate the slopes and directions based on the 8 sections from Tarboton http://www.neng.usu.edu/cee/faculty/dtarb/96wr03137.pdf """ shp = np.array(data.shape) - 1 direction = np.full(data.shape, FLAT_ID_INT, 'float64') mag = np.full(data.shape, FLAT_ID_INT, 'float64') slc0 = [slice(1, -1), slice(1, -1)] for ind in xrange(8): e1 = facets[ind][1] e2 = facets[ind][2] ang = ang_adj[ind] slc1 = [slice(1 + e1[0], shp[0] + e1[0]), slice(1 + e1[1], shp[1] + e1[1])] slc2 = [slice(1 + e2[0], shp[0] + e2[0]), slice(1 + e2[1], shp[1] + e2[1])] d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp) mag, direction = _calc_direction(data, mag, direction, ang, d1, d2, theta, slc0, slc1, slc2) # %%Now do the edges # if the edge is lower than the interior, we need to copy the value # from the interior (as an approximation) ids1 = (direction[:, 1] > np.pi / 2) \ & (direction[:, 1] < 3 * np.pi / 2) direction[ids1, 0] = direction[ids1, 1] mag[ids1, 0] = mag[ids1, 1] ids1 = (direction[:, -2] < np.pi / 2) \ | (direction[:, -2] > 3 * np.pi / 2) direction[ids1, -1] = direction[ids1, -2] mag[ids1, -1] = mag[ids1, -2] ids1 = (direction[1, :] > 0) & (direction[1, :] < np.pi) direction[0, ids1] = direction[1, ids1] mag[0, ids1] = mag[1, ids1] ids1 = (direction[-2, :] > np.pi) & (direction[-2, :] < 2 * np.pi) direction[-1, ids1] = direction[-2, ids1] mag[-1, ids1] = mag[-2, ids1] # Now update the edges in case they are higher than the interior (i.e. # look at the downstream angle) # left edge slc0 = [slice(1, -1), slice(0, 1)] for ind in [0, 1, 6, 7]: e1 = facets[ind][1] e2 = facets[ind][2] ang = ang_adj[ind] slc1 = [slice(1 + e1[0], shp[0] + e1[0]), slice(e1[1], 1 + e1[1])] slc2 = [slice(1 + e2[0], shp[0] + e2[0]), slice(e2[1], 1 + e2[1])] d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp) mag, direction = _calc_direction(data, mag, direction, ang, d1, d2, theta, slc0, slc1, slc2) # right edge slc0 = [slice(1, -1), slice(-1, None)] for ind in [2, 3, 4, 5]: e1 = facets[ind][1] e2 = facets[ind][2] ang = ang_adj[ind] slc1 = [slice(1 + e1[0], shp[0] + e1[0]), slice(shp[1] + e1[1], shp[1] + 1 + e1[1])] slc2 = [slice(1 + e2[0], shp[0] + e2[0]), slice(shp[1] + e2[1], shp[1] + 1 + e2[1])] d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp) mag, direction = _calc_direction(data, mag, direction, ang, d1, d2, theta, slc0, slc1, slc2) # top edge slc0 = [slice(0, 1), slice(1, -1)] for ind in [4, 5, 6, 7]: e1 = facets[ind][1] e2 = facets[ind][2] ang = ang_adj[ind] slc1 = [slice(e1[0], 1 + e1[0]), slice(1 + e1[1], shp[1] + e1[1])] slc2 = [slice(e2[0], 1 + e2[0]), slice(1 + e2[1], shp[1] + e2[1])] d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'top') mag, direction = _calc_direction(data, mag, direction, ang, d1, d2, theta, slc0, slc1, slc2) # bottom edge slc0 = [slice(-1, None), slice(1, -1)] for ind in [0, 1, 2, 3]: e1 = facets[ind][1] e2 = facets[ind][2] ang = ang_adj[ind] slc1 = [slice(shp[0] + e1[0], shp[0] + 1 + e1[0]), slice(1 + e1[1], shp[1] + e1[1])] slc2 = [slice(shp[0] + e2[0], shp[0] + 1 + e2[0]), slice(1 + e2[1], shp[1] + e2[1])] d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'bot') mag, direction = _calc_direction(data, mag, direction, ang, d1, d2, theta, slc0, slc1, slc2) # top-left corner slc0 = [slice(0, 1), slice(0, 1)] for ind in [6, 7]: e1 = facets[ind][1] e2 = facets[ind][2] ang = ang_adj[ind] slc1 = [slice(e1[0], 1 + e1[0]), slice(e1[1], 1 + e1[1])] slc2 = [slice(e2[0], 1 + e2[0]), slice(e2[1], 1 + e2[1])] d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'top') mag, direction = _calc_direction(data, mag, direction, ang, d1, d2, theta, slc0, slc1, slc2) # top-right corner slc0 = [slice(0, 1), slice(-1, None)] for ind in [4, 5]: e1 = facets[ind][1] e2 = facets[ind][2] ang = ang_adj[ind] slc1 = [slice(e1[0], 1 + e1[0]), slice(shp[1] + e1[1], shp[1] + 1 + e1[1])] slc2 = [slice(e2[0], 1 + e2[0]), slice(shp[1] + e2[1], shp[1] + 1 + e2[1])] d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'top') mag, direction = _calc_direction(data, mag, direction, ang, d1, d2, theta, slc0, slc1, slc2) # bottom-left corner slc0 = [slice(-1, None), slice(0, 1)] for ind in [0, 1]: e1 = facets[ind][1] e2 = facets[ind][2] ang = ang_adj[ind] slc1 = [slice(shp[0] + e1[0], shp[0] + 1 + e1[0]), slice(e1[1], 1 + e1[1])] slc2 = [slice(shp[0] + e2[0], shp[0] + 1 + e2[0]), slice(e2[1], 1 + e2[1])] d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'bot') mag, direction = _calc_direction(data, mag, direction, ang, d1, d2, theta, slc0, slc1, slc2) # bottom-right corner slc0 = [slice(-1, None), slice(-1, None)] for ind in [3, 4]: e1 = facets[ind][1] e2 = facets[ind][2] ang = ang_adj[ind] slc1 = [slice(shp[0] + e1[0], shp[0] + 1 + e1[0]), slice(shp[1] + e1[1], shp[1] + 1 + e1[1])] slc2 = [slice(shp[0] + e2[0], shp[0] + 1 + e2[0]), slice(shp[1] + e2[1], shp[1] + 1 + e2[1])] d1, d2, theta = _get_d1_d2(dX, dY, ind, e1, e2, shp, 'bot') mag, direction = _calc_direction(data, mag, direction, ang, d1, d2, theta, slc0, slc1, slc2) mag[mag > 0] = np.sqrt(mag[mag > 0]) return mag, direction
Calculate the slopes and directions based on the 8 sections from Tarboton http://www.neng.usu.edu/cee/faculty/dtarb/96wr03137.pdf
def launch_tor(config, reactor, tor_binary=None, progress_updates=None, connection_creator=None, timeout=None, kill_on_stderr=True, stdout=None, stderr=None): """ Deprecated; use launch() instead. See also controller.py """ from .controller import launch # XXX FIXME are we dealing with options in the config "properly" # as far as translating semantics from the old launch_tor to # launch()? DataDirectory, User, ControlPort, ...? tor = yield launch( reactor, stdout=stdout, stderr=stderr, progress_updates=progress_updates, tor_binary=tor_binary, connection_creator=connection_creator, timeout=timeout, kill_on_stderr=kill_on_stderr, _tor_config=config, ) defer.returnValue(tor.process)
Deprecated; use launch() instead. See also controller.py
def pupv_to_vRvz(pu,pv,u,v,delta=1.,oblate=False): """ NAME: pupv_to_vRvz PURPOSE: calculate cylindrical vR and vz from momenta in prolate or oblate confocal u and v coordinates for a given focal length delta INPUT: pu - u momentum pv - v momentum u - u coordinate v - v coordinate delta= focus oblate= (False) if True, compute oblate confocal coordinates instead of prolate OUTPUT: (vR,vz) HISTORY: 2017-12-04 - Written - Bovy (UofT) """ if oblate: denom= delta*(sc.sinh(u)**2.+sc.cos(v)**2.) vR= (pu*sc.sinh(u)*sc.sin(v)+pv*sc.cosh(u)*sc.cos(v))/denom vz= (pu*sc.cosh(u)*sc.cos(v)-pv*sc.sinh(u)*sc.sin(v))/denom else: denom= delta*(sc.sinh(u)**2.+sc.sin(v)**2.) vR= (pu*sc.cosh(u)*sc.sin(v)+pv*sc.sinh(u)*sc.cos(v))/denom vz= (pu*sc.sinh(u)*sc.cos(v)-pv*sc.cosh(u)*sc.sin(v))/denom return (vR,vz)
NAME: pupv_to_vRvz PURPOSE: calculate cylindrical vR and vz from momenta in prolate or oblate confocal u and v coordinates for a given focal length delta INPUT: pu - u momentum pv - v momentum u - u coordinate v - v coordinate delta= focus oblate= (False) if True, compute oblate confocal coordinates instead of prolate OUTPUT: (vR,vz) HISTORY: 2017-12-04 - Written - Bovy (UofT)
def upload_to_s3(self, key, filename): """ Set the content type and gzip headers if applicable and upload the item to S3 """ extra_args = {'ACL': self.acl} # determine the mimetype of the file guess = mimetypes.guess_type(filename) content_type = guess[0] encoding = guess[1] if content_type: extra_args['ContentType'] = content_type # add the gzip headers, if necessary if (self.gzip and content_type in self.gzip_content_types) or encoding == 'gzip': extra_args['ContentEncoding'] = 'gzip' # add the cache-control headers if necessary if content_type in self.cache_control: extra_args['CacheControl'] = ''.join(( 'max-age=', str(self.cache_control[content_type]) )) # access and write the contents from the file if not self.dry_run: logger.debug("Uploading %s" % filename) if self.verbosity > 0: self.stdout.write("Uploading %s" % filename) s3_obj = self.s3_resource.Object(self.aws_bucket_name, key) s3_obj.upload_file(filename, ExtraArgs=extra_args) # Update counts self.uploaded_files += 1 self.uploaded_file_list.append(filename)
Set the content type and gzip headers if applicable and upload the item to S3
def get_cutoff(value: float, cutoff: Optional[float] = None) -> int: """Assign if a value is greater than or less than a cutoff.""" cutoff = cutoff if cutoff is not None else 0 if value > cutoff: return 1 if value < (-1 * cutoff): return - 1 return 0
Assign if a value is greater than or less than a cutoff.
def _get_comments(session, group_or_user_id, wall_id): """ https://vk.com/dev/wall.getComments """ return session.fetch_items("wall.getComments", Comment.from_json, count=100, owner_id=group_or_user_id, post_id=wall_id, need_likes=1)
https://vk.com/dev/wall.getComments
def leaves(self): """ Returns a :class:`QuerySet` of all leaf nodes (nodes with no children). :return: A :class:`QuerySet` of all leaf nodes (nodes with no children). """ # We need to read the _cte_node_children attribute, so ensure it exists. self._ensure_parameters() return self.exclude( **{"%s__id__in" % self.model._cte_node_children: self.all()} )
Returns a :class:`QuerySet` of all leaf nodes (nodes with no children). :return: A :class:`QuerySet` of all leaf nodes (nodes with no children).
def union(self, *iterables): """ Return a new SortedSet with elements from the set and all *iterables*. """ return self.__class__(chain(iter(self), *iterables), key=self._key)
Return a new SortedSet with elements from the set and all *iterables*.
def _register(self, name): """ @Api private Add new :py:class:`TemplateHook` into the registry :param str name: Hook name :return: Instance of :py:class:`TemplateHook` :rtype: :py:class:`TemplateHook` """ templatehook = TemplateHook() self._registry[name] = templatehook return templatehook
@Api private Add new :py:class:`TemplateHook` into the registry :param str name: Hook name :return: Instance of :py:class:`TemplateHook` :rtype: :py:class:`TemplateHook`
def make_shell_logfile_data_url(host, shell_port, instance_id, offset, length): """ Make the url for log-file data in heron-shell from the info stored in stmgr. """ return "http://%s:%d/filedata/log-files/%s.log.0?offset=%s&length=%s" % \ (host, shell_port, instance_id, offset, length)
Make the url for log-file data in heron-shell from the info stored in stmgr.
def jinja_fragment_extension(tag, endtag=None, name=None, tag_only=False, allow_args=True, callblock_args=None): """Decorator to easily create a jinja extension which acts as a fragment. """ if endtag is None: endtag = "end" + tag def decorator(f): def parse(self, parser): lineno = parser.stream.next().lineno args = [] kwargs = [] if allow_args: args, kwargs = parse_block_signature(parser) call = self.call_method("support_method", args, kwargs, lineno=lineno) if tag_only: return nodes.Output([call], lineno=lineno) call_args = [] if callblock_args is not None: for arg in callblock_args: call_args.append(nodes.Name(arg, 'param', lineno=lineno)) body = parser.parse_statements(['name:' + endtag], drop_needle=True) return nodes.CallBlock(call, call_args, [], body, lineno=lineno) def support_method(self, *args, **kwargs): return f(*args, **kwargs) attrs = {"tags": set([tag]), "parse": parse, "support_method": support_method} return type(name or f.__name__, (Extension,), attrs) return decorator
Decorator to easily create a jinja extension which acts as a fragment.
def align_epi_anat(anatomy,epi_dsets,skull_strip_anat=True): ''' aligns epis to anatomy using ``align_epi_anat.py`` script :epi_dsets: can be either a string or list of strings of the epi child datasets :skull_strip_anat: if ``True``, ``anatomy`` will be skull-stripped using the default method The default output suffix is "_al" ''' if isinstance(epi_dsets,basestring): epi_dsets = [epi_dsets] if len(epi_dsets)==0: nl.notify('Warning: no epi alignment datasets given for anatomy %s!' % anatomy,level=nl.level.warning) return if all(os.path.exists(nl.suffix(x,'_al')) for x in epi_dsets): return anatomy_use = anatomy if skull_strip_anat: nl.skull_strip(anatomy,'_ns') anatomy_use = nl.suffix(anatomy,'_ns') inputs = [anatomy_use] + epi_dsets dset_products = lambda dset: [nl.suffix(dset,'_al'), nl.prefix(dset)+'_al_mat.aff12.1D', nl.prefix(dset)+'_tsh_vr_motion.1D'] products = nl.flatten([dset_products(dset) for dset in epi_dsets]) with nl.run_in_tmp(inputs,products): if nl.is_nifti(anatomy_use): anatomy_use = nl.afni_copy(anatomy_use) epi_dsets_use = [] for dset in epi_dsets: if nl.is_nifti(dset): epi_dsets_use.append(nl.afni_copy(dset)) else: epi_dsets_use.append(dset) cmd = ["align_epi_anat.py", "-epi2anat", "-anat_has_skull", "no", "-epi_strip", "3dAutomask","-anat", anatomy_use, "-epi_base", "5", "-epi", epi_dsets_use[0]] if len(epi_dsets_use)>1: cmd += ['-child_epi'] + epi_dsets_use[1:] out = nl.run(cmd) for dset in epi_dsets: if nl.is_nifti(dset): dset_nifti = nl.nifti_copy(nl.prefix(dset)+'_al+orig') if dset_nifti and os.path.exists(dset_nifti) and dset_nifti.endswith('.nii') and dset.endswith('.gz'): nl.run(['gzip',dset_nifti])
aligns epis to anatomy using ``align_epi_anat.py`` script :epi_dsets: can be either a string or list of strings of the epi child datasets :skull_strip_anat: if ``True``, ``anatomy`` will be skull-stripped using the default method The default output suffix is "_al"
def colors(palette): """Example endpoint return a list of colors by palette This is using docstring for specifications --- tags: - colors parameters: - name: palette in: path type: string enum: ['all', 'rgb', 'cmyk'] required: true default: all description: Which palette to filter? operationId: get_colors consumes: - application/json produces: - application/json security: colors_auth: - 'write:colors' - 'read:colors' schemes: ['http', 'https'] deprecated: false externalDocs: description: Project repository url: http://github.com/rochacbruno/flasgger definitions: Palette: type: object properties: palette_name: type: array items: $ref: '#/definitions/Color' Color: type: string responses: 200: description: A list of colors (may be filtered by palette) schema: $ref: '#/definitions/Palette' examples: rgb: ['red', 'green', 'blue'] """ all_colors = { 'cmyk': ['cian', 'magenta', 'yellow', 'black'], 'rgb': ['red', 'green', 'blue'] } if palette == 'all': result = all_colors else: result = {palette: all_colors.get(palette)} return jsonify(result)
Example endpoint return a list of colors by palette This is using docstring for specifications --- tags: - colors parameters: - name: palette in: path type: string enum: ['all', 'rgb', 'cmyk'] required: true default: all description: Which palette to filter? operationId: get_colors consumes: - application/json produces: - application/json security: colors_auth: - 'write:colors' - 'read:colors' schemes: ['http', 'https'] deprecated: false externalDocs: description: Project repository url: http://github.com/rochacbruno/flasgger definitions: Palette: type: object properties: palette_name: type: array items: $ref: '#/definitions/Color' Color: type: string responses: 200: description: A list of colors (may be filtered by palette) schema: $ref: '#/definitions/Palette' examples: rgb: ['red', 'green', 'blue']
def mkCuttingStock(s): """mkCuttingStock: convert a bin packing instance into cutting stock format""" w,q = [],[] # list of different widths (sizes) of items, their quantities for item in sorted(s): if w == [] or item != w[-1]: w.append(item) q.append(1) else: q[-1] += 1 return w,q
mkCuttingStock: convert a bin packing instance into cutting stock format
def add_listener(self, on_message=None): """ Subscribes to this topic. When someone publishes a message on this topic, on_message() function is called if provided. :param on_message: (Function), function to be called when a message is published. :return: (str), a registration id which is used as a key to remove the listener. """ request = topic_add_message_listener_codec.encode_request(self.name, False) def handle(item, publish_time, uuid): member = self._client.cluster.get_member_by_uuid(uuid) item_event = TopicMessage(self.name, item, publish_time, member, self._to_object) on_message(item_event) return self._start_listening(request, lambda m: topic_add_message_listener_codec.handle(m, handle), lambda r: topic_add_message_listener_codec.decode_response(r)['response'], self.partition_key)
Subscribes to this topic. When someone publishes a message on this topic, on_message() function is called if provided. :param on_message: (Function), function to be called when a message is published. :return: (str), a registration id which is used as a key to remove the listener.
def build_query(self, case_id, query=None, variant_ids=None, category='snv'): """Build a mongo query These are the different query options: { 'genetic_models': list, 'chrom': str, 'thousand_genomes_frequency': float, 'exac_frequency': float, 'clingen_ngi': int, 'cadd_score': float, 'cadd_inclusive": boolean, 'genetic_models': list(str), 'hgnc_symbols': list, 'region_annotations': list, 'functional_annotations': list, 'clinsig': list, 'clinsig_confident_always_returned': boolean, 'variant_type': str(('research', 'clinical')), 'chrom': str, 'start': int, 'end': int, 'svtype': list, 'size': int, 'size_shorter': boolean, 'gene_panels': list(str), 'mvl_tag": boolean, 'decipher": boolean, } Arguments: case_id(str) query(dict): a dictionary of query filters specified by the users variant_ids(list(str)): A list of md5 variant ids Returns: mongo_query : A dictionary in the mongo query format """ query = query or {} mongo_query = {} gene_query = None ##### Base query params # set up the fundamental query params: case_id, category, type and # restrict to list of variants (if var list is provided) for criterion in FUNDAMENTAL_CRITERIA: if criterion == 'case_id': LOG.debug("Building a mongo query for %s" % case_id) mongo_query['case_id'] = case_id elif criterion == 'variant_ids' and variant_ids: LOG.debug("Adding variant_ids %s to query" % ', '.join(variant_ids)) mongo_query['variant_id'] = {'$in': variant_ids} elif criterion == 'category': LOG.debug("Querying category %s" % category) mongo_query['category'] = category elif criterion == 'variant_type': mongo_query['variant_type'] = query.get('variant_type', 'clinical') LOG.debug("Set variant type to %s", mongo_query['variant_type']) # Requests to filter based on gene panels, hgnc_symbols or # coordinate ranges must always be honored. They are always added to # query as top level, implicit '$and'. When both hgnc_symbols and a # panel is used, addition of this is delayed until after the rest of # the query content is clear. elif criterion in ['hgnc_symbols', 'gene_panels'] and gene_query is None: gene_query = self.gene_filter(query, mongo_query) elif criterion == 'chrom' and query.get('chrom'): # filter by coordinates self.coordinate_filter(query, mongo_query) elif criterion == 'variant_ids' and variant_ids: LOG.debug("Adding variant_ids %s to query" % ', '.join(variant_ids)) mongo_query['variant_id'] = {'$in': variant_ids} ##### end of fundamental query params ##### start of the custom query params # there is only 'clinsig' criterion among the primary terms right now primary_terms = False # gnomad_frequency, local_obs, clingen_ngi, swegen, spidex_human, cadd_score, genetic_models, mvl_tag # functional_annotations, region_annotations, size, svtype, decipher, depth, alt_count, control_frequency secondary_terms = False # check if any of the primary criteria was specified in the query for term in PRIMARY_CRITERIA: if query.get(term): primary_terms = True # check if any of the secondary criteria was specified in the query: for term in SECONDARY_CRITERIA: if query.get(term): secondary_terms = True if primary_terms is True: clinsign_filter = self.clinsig_query(query, mongo_query) # Secondary, excluding filter criteria will hide variants in general, # but can be overridden by an including, major filter criteria # such as a Pathogenic ClinSig. if secondary_terms is True: secondary_filter = self.secondary_query(query, mongo_query) # If there are no primary criteria given, all secondary criteria are added as a # top level '$and' to the query. if primary_terms is False: if gene_query: mongo_query['$and'] = [ {'$or': gene_query}, {'$and': secondary_filter}] else: mongo_query['$and'] = secondary_filter # If there is only one primary criterion given without any secondary, it will also be # added as a top level '$and'. # Otherwise, primary criteria are added as a high level '$or' and all secondary criteria # are joined together with them as a single lower level '$and'. if primary_terms is True: # clinsig is specified # Given a request to always return confident clinical variants, # add the clnsig query as a major criteria, but only # trust clnsig entries with trusted revstat levels. if query.get('clinsig_confident_always_returned') == True: if gene_query: mongo_query['$and'] = [ {'$or': gene_query}, { '$or': [ {'$and': secondary_filter}, clinsign_filter ] } ] else: mongo_query['$or'] = [ {'$and': secondary_filter}, clinsign_filter ] else: # clisig terms are provided but no need for trusted revstat levels secondary_filter.append(clinsign_filter) if gene_query: mongo_query['$and'] = [ {'$or': gene_query}, {'$and': secondary_filter}] else: mongo_query['$and'] = secondary_filter elif primary_terms is True: # clisig is provided without secondary terms query # use implicit and mongo_query['clnsig'] = clinsign_filter['clnsig'] if gene_query: mongo_query['$and'] = [{ '$or': gene_query }] elif gene_query: # no primary or secondary filters provided mongo_query['$and'] = [{ '$or': gene_query }] LOG.info("mongo query: %s", mongo_query) return mongo_query
Build a mongo query These are the different query options: { 'genetic_models': list, 'chrom': str, 'thousand_genomes_frequency': float, 'exac_frequency': float, 'clingen_ngi': int, 'cadd_score': float, 'cadd_inclusive": boolean, 'genetic_models': list(str), 'hgnc_symbols': list, 'region_annotations': list, 'functional_annotations': list, 'clinsig': list, 'clinsig_confident_always_returned': boolean, 'variant_type': str(('research', 'clinical')), 'chrom': str, 'start': int, 'end': int, 'svtype': list, 'size': int, 'size_shorter': boolean, 'gene_panels': list(str), 'mvl_tag": boolean, 'decipher": boolean, } Arguments: case_id(str) query(dict): a dictionary of query filters specified by the users variant_ids(list(str)): A list of md5 variant ids Returns: mongo_query : A dictionary in the mongo query format
def warn_quirks(message, recommend, pattern, index): """Warn quirks.""" import traceback import bs4 # noqa: F401 # Acquire source code line context paths = (MODULE, sys.modules['bs4'].__path__[0]) tb = traceback.extract_stack() previous = None filename = None lineno = None for entry in tb: if (PY35 and entry.filename.startswith(paths)) or (not PY35 and entry[0].startswith(paths)): break previous = entry if previous: filename = previous.filename if PY35 else previous[0] lineno = previous.lineno if PY35 else previous[1] # Format pattern to show line and column position context, line = get_pattern_context(pattern, index)[0:2] # Display warning warnings.warn_explicit( "\nCSS selector pattern:\n" + " {}\n".format(message) + " This behavior is only allowed temporarily for Beautiful Soup's transition to Soup Sieve.\n" + " In order to confrom to the CSS spec, {}\n".format(recommend) + " It is strongly recommended the selector be altered to conform to the CSS spec " + "as an exception will be raised for this case in the future.\n" + "pattern line {}:\n{}".format(line, context), QuirksWarning, filename, lineno )
Warn quirks.
def vsan_datastore_configured(name, datastore_name): ''' Configures the cluster's VSAN datastore WARNING: The VSAN datastore is created automatically after the first ESXi host is added to the cluster; the state assumes that the datastore exists and errors if it doesn't. ''' cluster_name, datacenter_name = \ __salt__['esxcluster.get_details']()['cluster'], \ __salt__['esxcluster.get_details']()['datacenter'] display_name = '{0}/{1}'.format(datacenter_name, cluster_name) log.info('Running vsan_datastore_configured for \'%s\'', display_name) ret = {'name': name, 'changes': {}, 'result': None, 'comment': 'Default'} comments = [] changes = {} changes_required = False try: si = __salt__['vsphere.get_service_instance_via_proxy']() # Checking if we need to rename the vsan datastore vsan_ds = _get_vsan_datastore(si, cluster_name) if vsan_ds['name'] == datastore_name: comments.append('vSAN datastore is correctly named \'{0}\'. ' 'Nothing to be done.'.format(vsan_ds['name'])) log.info(comments[-1]) else: # vsan_ds needs to be updated changes_required = True if __opts__['test']: comments.append('State {0} will rename the vSAN datastore to ' '\'{1}\'.'.format(name, datastore_name)) log.info(comments[-1]) else: log.trace('Renaming vSAN datastore \'%s\' to \'%s\'', vsan_ds['name'], datastore_name) __salt__['vsphere.rename_datastore']( datastore_name=vsan_ds['name'], new_datastore_name=datastore_name, service_instance=si) comments.append('Renamed vSAN datastore to \'{0}\'.' ''.format(datastore_name)) changes = {'vsan_datastore': {'new': {'name': datastore_name}, 'old': {'name': vsan_ds['name']}}} log.info(comments[-1]) __salt__['vsphere.disconnect'](si) ret.update({'result': True if (not changes_required) else None if __opts__['test'] else True, 'comment': '\n'.join(comments), 'changes': changes}) return ret except salt.exceptions.CommandExecutionError as exc: log.exception('Encountered error') if si: __salt__['vsphere.disconnect'](si) ret.update({ 'result': False, 'comment': exc.strerror}) return ret
Configures the cluster's VSAN datastore WARNING: The VSAN datastore is created automatically after the first ESXi host is added to the cluster; the state assumes that the datastore exists and errors if it doesn't.
def TBH(cpu, dest): """ Table Branch Halfword causes a PC-relative forward branch using a table of single halfword offsets. A base register provides a pointer to the table, and a second register supplies an index into the table. The branch length is twice the value of the halfword returned from the table. :param ARMv7Operand dest: see below; register """ # Capstone merges the two registers values into one operand, so we need to extract them back # Specifies the base register. This contains the address of the table of branch lengths. This # register is allowed to be the PC. If it is, the table immediately follows this instruction. base_addr = dest.get_mem_base_addr() if dest.mem.base in ('PC', 'R15'): base_addr = cpu.PC # Specifies the index register. This contains an integer pointing to a halfword within the table. # The offset within the table is twice the value of the index. offset = cpu.read_int(base_addr + dest.get_mem_offset(), 16) offset = Operators.ZEXTEND(offset, cpu.address_bit_size) cpu.PC += (offset << 1)
Table Branch Halfword causes a PC-relative forward branch using a table of single halfword offsets. A base register provides a pointer to the table, and a second register supplies an index into the table. The branch length is twice the value of the halfword returned from the table. :param ARMv7Operand dest: see below; register
def hashleftjoin(left, right, key=None, lkey=None, rkey=None, missing=None, cache=True, lprefix=None, rprefix=None): """Alternative implementation of :func:`petl.transform.joins.leftjoin`, where the join is executed by constructing an in-memory lookup for the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. By default data from right hand table is cached to improve performance (only available when `key` is given). Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments. """ lkey, rkey = keys_from_args(left, right, key, lkey, rkey) return HashLeftJoinView(left, right, lkey, rkey, missing=missing, cache=cache, lprefix=lprefix, rprefix=rprefix)
Alternative implementation of :func:`petl.transform.joins.leftjoin`, where the join is executed by constructing an in-memory lookup for the right hand table, then iterating over rows from the left hand table. May be faster and/or more resource efficient where the right table is small and the left table is large. By default data from right hand table is cached to improve performance (only available when `key` is given). Left and right tables with different key fields can be handled via the `lkey` and `rkey` arguments.
def write_kwargs_to_attrs(cls, attrs, **kwargs): """Writes the given keywords to the given ``attrs``. If any keyword argument points to a dict, the keyword will point to a list of the dict's keys. Each key is then written to the attrs with its corresponding value. Parameters ---------- attrs : an HDF attrs The ``attrs`` of an hdf file or a group in an hdf file. \**kwargs : The keywords to write. """ for arg, val in kwargs.items(): if val is None: val = str(None) if isinstance(val, dict): attrs[arg] = val.keys() # just call self again with the dict as kwargs cls.write_kwargs_to_attrs(attrs, **val) else: attrs[arg] = val
Writes the given keywords to the given ``attrs``. If any keyword argument points to a dict, the keyword will point to a list of the dict's keys. Each key is then written to the attrs with its corresponding value. Parameters ---------- attrs : an HDF attrs The ``attrs`` of an hdf file or a group in an hdf file. \**kwargs : The keywords to write.
def search(self, title=None, libtype=None, **kwargs): """ Searching within a library section is much more powerful. It seems certain attributes on the media objects can be targeted to filter this search down a bit, but I havent found the documentation for it. Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other items such as actor=<id> seem to work, but require you already know the id of the actor. TLDR: This is untested but seems to work. Use library section search when you can. """ args = {} if title: args['title'] = title if libtype: args['type'] = utils.searchType(libtype) for attr, value in kwargs.items(): args[attr] = value key = '/library/all%s' % utils.joinArgs(args) return self.fetchItems(key)
Searching within a library section is much more powerful. It seems certain attributes on the media objects can be targeted to filter this search down a bit, but I havent found the documentation for it. Example: "studio=Comedy%20Central" or "year=1999" "title=Kung Fu" all work. Other items such as actor=<id> seem to work, but require you already know the id of the actor. TLDR: This is untested but seems to work. Use library section search when you can.
def create_linear(num_finite_buckets, width, offset): """Creates a new instance of distribution with linear buckets. Args: num_finite_buckets (int): initializes number of finite buckets width (float): initializes the width of each bucket offset (float): initializes the offset Return: :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution` Raises: ValueError: if the args are invalid for creating an instance """ if num_finite_buckets <= 0: raise ValueError(_BAD_NUM_FINITE_BUCKETS) if width <= 0.0: raise ValueError(_BAD_FLOAT_ARG % (u'width', 0.0)) return sc_messages.Distribution( bucketCounts=[0] * (num_finite_buckets + 2), linearBuckets=sc_messages.LinearBuckets( numFiniteBuckets=num_finite_buckets, width=width, offset=offset))
Creates a new instance of distribution with linear buckets. Args: num_finite_buckets (int): initializes number of finite buckets width (float): initializes the width of each bucket offset (float): initializes the offset Return: :class:`endpoints_management.gen.servicecontrol_v1_messages.Distribution` Raises: ValueError: if the args are invalid for creating an instance
def uhstack(arrs): """Stack arrays in sequence horizontally while preserving units This is a wrapper around np.hstack that preserves units. Examples -------- >>> from unyt import km >>> a = [1, 2, 3]*km >>> b = [2, 3, 4]*km >>> print(uhstack([a, b])) [1 2 3 2 3 4] km >>> a = [[1],[2],[3]]*km >>> b = [[2],[3],[4]]*km >>> print(uhstack([a, b])) [[1 2] [2 3] [3 4]] km """ v = np.hstack(arrs) v = _validate_numpy_wrapper_units(v, arrs) return v
Stack arrays in sequence horizontally while preserving units This is a wrapper around np.hstack that preserves units. Examples -------- >>> from unyt import km >>> a = [1, 2, 3]*km >>> b = [2, 3, 4]*km >>> print(uhstack([a, b])) [1 2 3 2 3 4] km >>> a = [[1],[2],[3]]*km >>> b = [[2],[3],[4]]*km >>> print(uhstack([a, b])) [[1 2] [2 3] [3 4]] km
def observable(operator, rho, unfolding, complex=False): r"""Return an observable ammount. INPUT: - ``operator`` - An square matrix representing a hermitian operator \ in thesame basis as the density matrix. - ``rho`` - A density matrix in unfolded format, or a list of such \ density matrices. - ``unfolding`` - A mapping from matrix element indices to unfolded \ indices. >>> Ne = 2 >>> unfolding = Unfolding(Ne, True, True, True) >>> rho = np.array([[0.6, 1+2j], [1-2j, 0.4]]) >>> rho = unfolding(rho) >>> sx = np.array([[0, 1], [1, 0]]) >>> print(observable(sx, rho, unfolding)) 2.0 """ if len(rho.shape) == 2: return np.array([observable(operator, i, unfolding) for i in rho]) Ne = unfolding.Ne Mu = unfolding.Mu obs = 0 if unfolding.normalized: rho11 = 1 - sum([rho[Mu(1, i, i)] for i in range(1, Ne)]) for i in range(Ne): for k in range(Ne): if unfolding.real: if k == 0 and i == 0: obs += operator[i, k]*rho11 else: if k < i: u, v = (i, k) else: u, v = (k, i) obs += operator[i, k]*rho[Mu(1, u, v)] if k != i: if k < i: obs += 1j*operator[i, k]*rho[Mu(-1, u, v)] else: obs += -1j*operator[i, k]*rho[Mu(-1, u, v)] else: if k == 0 and i == 0: obs += operator[i, k]*rho11 else: obs += operator[i, k]*rho[Mu(0, k, i)] if not complex: obs = np.real(obs) return obs
r"""Return an observable ammount. INPUT: - ``operator`` - An square matrix representing a hermitian operator \ in thesame basis as the density matrix. - ``rho`` - A density matrix in unfolded format, or a list of such \ density matrices. - ``unfolding`` - A mapping from matrix element indices to unfolded \ indices. >>> Ne = 2 >>> unfolding = Unfolding(Ne, True, True, True) >>> rho = np.array([[0.6, 1+2j], [1-2j, 0.4]]) >>> rho = unfolding(rho) >>> sx = np.array([[0, 1], [1, 0]]) >>> print(observable(sx, rho, unfolding)) 2.0
def intersection(self, other): """ Returns a new tree of all intervals common to both self and other. """ ivs = set() shorter, longer = sorted([self, other], key=len) for iv in shorter: if iv in longer: ivs.add(iv) return IntervalTree(ivs)
Returns a new tree of all intervals common to both self and other.
def _consolidate_repo_sources(sources): ''' Consolidate APT sources. ''' if not isinstance(sources, sourceslist.SourcesList): raise TypeError( '\'{0}\' not a \'{1}\''.format( type(sources), sourceslist.SourcesList ) ) consolidated = {} delete_files = set() base_file = sourceslist.SourceEntry('').file repos = [s for s in sources.list if not s.invalid] for repo in repos: repo.uri = repo.uri.rstrip('/') # future lint: disable=blacklisted-function key = str((getattr(repo, 'architectures', []), repo.disabled, repo.type, repo.uri, repo.dist)) # future lint: enable=blacklisted-function if key in consolidated: combined = consolidated[key] combined_comps = set(repo.comps).union(set(combined.comps)) consolidated[key].comps = list(combined_comps) else: consolidated[key] = sourceslist.SourceEntry(salt.utils.pkg.deb.strip_uri(repo.line)) if repo.file != base_file: delete_files.add(repo.file) sources.list = list(consolidated.values()) sources.save() for file_ in delete_files: try: os.remove(file_) except OSError: pass return sources
Consolidate APT sources.
def init(self): "Initialize the message-digest and set all fields to zero." self.length = 0L self.input = [] # Load magic initialization constants. self.A = 0x67452301L self.B = 0xefcdab89L self.C = 0x98badcfeL self.D = 0x10325476L
Initialize the message-digest and set all fields to zero.
def add_states(self, *states): ''' Add @states. ''' for state in states: self.states[state] = EventManagerPlus(self)
Add @states.
def load_data(filespec, idx=None, logger=None, **kwargs): """Load data from a file. This call is used to load a data item from a filespec (path or URL) Parameters ---------- filespec : str The path of the file to load (can be a URL). idx : int or string (optional) The index or name of the data unit in the file (e.g. an HDU name) logger : python logger (optional) A logger to record progress opening the item All other keyword parameters are passed to the opener chosen for the file type. Returns ------- data_obj : a data object for a ginga viewer """ global loader_registry info = iohelper.get_fileinfo(filespec) filepath = info.filepath if idx is None: idx = info.numhdu # Assume type to be a FITS image unless the MIME association says # it is something different. try: typ, subtyp = iohelper.guess_filetype(filepath) except Exception as e: if logger is not None: logger.warning("error determining file type: %s; " "assuming 'image/fits'" % (str(e))) # Can't determine file type: assume and attempt FITS typ, subtyp = 'image', 'fits' if logger is not None: logger.debug("assuming file type: %s/%s'" % (typ, subtyp)) try: loader_info = loader_registry['%s/%s' % (typ, subtyp)] data_loader = loader_info.loader except KeyError: # for now, assume that this is an unrecognized FITS file data_loader = load_fits data_obj = data_loader(filepath, idx=idx, logger=logger, **kwargs) return data_obj
Load data from a file. This call is used to load a data item from a filespec (path or URL) Parameters ---------- filespec : str The path of the file to load (can be a URL). idx : int or string (optional) The index or name of the data unit in the file (e.g. an HDU name) logger : python logger (optional) A logger to record progress opening the item All other keyword parameters are passed to the opener chosen for the file type. Returns ------- data_obj : a data object for a ginga viewer
def run_work(self): """Run attacks and defenses""" if os.path.exists(LOCAL_EVAL_ROOT_DIR): sudo_remove_dirtree(LOCAL_EVAL_ROOT_DIR) self.run_attacks() self.run_defenses()
Run attacks and defenses
def move_items(self, from_group, to_group): """Take all elements from the from_group and add it to the to_group.""" if from_group not in self.keys() or len(self.groups[from_group]) == 0: return self.groups.setdefault(to_group, list()).extend(self.groups.get (from_group, list())) if from_group in self.groups: del self.groups[from_group]
Take all elements from the from_group and add it to the to_group.
def _attr_sort_func(model, iter1, iter2, attribute): """Internal helper """ attr1 = getattr(model[iter1][0], attribute, None) attr2 = getattr(model[iter2][0], attribute, None) return cmp(attr1, attr2)
Internal helper
def _find_usage_security_groups(self): """find usage for security groups""" vpc_count = 0 paginator = self.conn.get_paginator('describe_db_security_groups') for page in paginator.paginate(): for group in page['DBSecurityGroups']: if 'VpcId' in group and group['VpcId'] is not None: vpc_count += 1 self.limits['Max auths per security group']._add_current_usage( len(group["EC2SecurityGroups"]) + len(group["IPRanges"]), aws_type='AWS::RDS::DBSecurityGroup', resource_id=group['DBSecurityGroupName'] ) self.limits['VPC Security Groups']._add_current_usage( vpc_count, aws_type='AWS::RDS::DBSecurityGroup', )
find usage for security groups
def diagnose_cluster( self, project_id, region, cluster_name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets cluster diagnostic information. After the operation completes, the Operation.response field contains ``DiagnoseClusterOutputLocation``. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> >>> response = client.diagnose_cluster(project_id, region, cluster_name) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "diagnose_cluster" not in self._inner_api_calls: self._inner_api_calls[ "diagnose_cluster" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.diagnose_cluster, default_retry=self._method_configs["DiagnoseCluster"].retry, default_timeout=self._method_configs["DiagnoseCluster"].timeout, client_info=self._client_info, ) request = clusters_pb2.DiagnoseClusterRequest( project_id=project_id, region=region, cluster_name=cluster_name ) operation = self._inner_api_calls["diagnose_cluster"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, empty_pb2.Empty, metadata_type=clusters_pb2.DiagnoseClusterResults, )
Gets cluster diagnostic information. After the operation completes, the Operation.response field contains ``DiagnoseClusterOutputLocation``. Example: >>> from google.cloud import dataproc_v1beta2 >>> >>> client = dataproc_v1beta2.ClusterControllerClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # TODO: Initialize `region`: >>> region = '' >>> >>> # TODO: Initialize `cluster_name`: >>> cluster_name = '' >>> >>> response = client.diagnose_cluster(project_id, region, cluster_name) >>> >>> def callback(operation_future): ... # Handle result. ... result = operation_future.result() >>> >>> response.add_done_callback(callback) >>> >>> # Handle metadata. >>> metadata = response.metadata() Args: project_id (str): Required. The ID of the Google Cloud Platform project that the cluster belongs to. region (str): Required. The Cloud Dataproc region in which to handle the request. cluster_name (str): Required. The cluster name. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def fetch_access_token(self): """ 获取 access token 详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=通用接口文档 :return: 返回的 JSON 数据包 """ return self._fetch_access_token( url='https://api.weixin.qq.com/cgi-bin/token', params={ 'grant_type': 'client_credential', 'appid': self.appid, 'secret': self.secret } )
获取 access token 详情请参考 http://mp.weixin.qq.com/wiki/index.php?title=通用接口文档 :return: 返回的 JSON 数据包
def removi(item, inset): """ Remove an item from an integer set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/removi_c.html :param item: Item to be removed. :type item: int :param inset: Set to be updated. :type inset: spiceypy.utils.support_types.SpiceCell """ assert isinstance(inset, stypes.SpiceCell) assert inset.dtype == 2 item = ctypes.c_int(item) libspice.removi_c(item, ctypes.byref(inset))
Remove an item from an integer set. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/removi_c.html :param item: Item to be removed. :type item: int :param inset: Set to be updated. :type inset: spiceypy.utils.support_types.SpiceCell
def _get_representative(self, obj): """Finds and returns the root of the set containing `obj`.""" if obj not in self._parents: self._parents[obj] = obj self._weights[obj] = 1 self._prev_next[obj] = [obj, obj] self._min_values[obj] = obj return obj path = [obj] root = self._parents[obj] while root != path[-1]: path.append(root) root = self._parents[root] # compress the path and return for ancestor in path: self._parents[ancestor] = root return root
Finds and returns the root of the set containing `obj`.
def set_finished(self): """This stores the number of items that have been pushed, and transitions the current component to the FINISHED state (which precedes the STOPPED state). The FINISHED state isn't really necessary unless methods/hooks are overridden to depend on it, but the count must be stored at one point so that thenext components knows how many items to expect. This is done by default after the loop breaks, but can be manually called sooner, if desired. """ component_name = self.get_component_name() self.log( logging.INFO, "Component [%s] is being marked as finished.", component_name) existing_state = self.__get_state(component_name) assert existing_state == fss.constants.PCS_RUNNING, \ "Can not change to 'finished' state from unsupported " \ "state: (" + str(existing_state) + ")" self.__set_data('count', self.__push_count) self.__set_state(fss.constants.PCS_FINISHED)
This stores the number of items that have been pushed, and transitions the current component to the FINISHED state (which precedes the STOPPED state). The FINISHED state isn't really necessary unless methods/hooks are overridden to depend on it, but the count must be stored at one point so that thenext components knows how many items to expect. This is done by default after the loop breaks, but can be manually called sooner, if desired.
def parse_keys_and_ranges(i_str, keyfunc, rangefunc): '''Parse the :class:`from_kvlayer` input string. This accepts two formats. In the textual format, it accepts any number of stream IDs in timestamp-docid format, separated by ``,`` or ``;``, and processes those as individual stream IDs. In the binary format, it accepts 20-byte key blobs (16 bytes md5 hash, 4 bytes timestamp) split by ``;`` or ``<``; e.g., ``a<f;x`` loads scans keys `a` through `f` and loads singly key `x`. `keyfunc` and `rangefunc` are run as generators and their yields are yielded from this function. ''' while i_str: m = _STREAM_ID_RE.match(i_str) if m: # old style text stream_id for retval in keyfunc(stream_id_to_kvlayer_key(m.group())): yield retval i_str = i_str[m.end():] while i_str and ((i_str[0] == ',') or (i_str[0] == ';')): i_str = i_str[1:] continue if len(i_str) == SI_KEY_LENGTH: # one key, get it. key = parse_si_key(i_str) for retval in keyfunc(key): yield retval return keya = i_str[:SI_KEY_LENGTH] splitc = i_str[SI_KEY_LENGTH] if splitc == '<': # range keyb = i_str[SI_KEY_LENGTH+1:SI_KEY_LENGTH+1+SI_KEY_LENGTH] i_str = i_str[SI_KEY_LENGTH+1+SI_KEY_LENGTH:] keya = parse_si_key(keya) keyb = parse_si_key(keyb) for retval in rangefunc(keya, keyb): yield retval elif splitc == ';': # keya is single key to load keya = parse_si_key(keya) for retval in keyfunc(keya): yield retval i_str = i_str[SI_KEY_LENGTH+1+1:] else: logger.error('bogus key splitter %s, %r', splitc, i_str) return
Parse the :class:`from_kvlayer` input string. This accepts two formats. In the textual format, it accepts any number of stream IDs in timestamp-docid format, separated by ``,`` or ``;``, and processes those as individual stream IDs. In the binary format, it accepts 20-byte key blobs (16 bytes md5 hash, 4 bytes timestamp) split by ``;`` or ``<``; e.g., ``a<f;x`` loads scans keys `a` through `f` and loads singly key `x`. `keyfunc` and `rangefunc` are run as generators and their yields are yielded from this function.
def list_versions(self, layer_id): """ Filterable list of versions of a layer, always ordered newest to oldest. If the version’s source supports revisions, you can get a specific revision using ``.filter(data__source__revision=value)``. Specific values depend on the source type. Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter using ``<`` or ``>=`` operators respectively. """ target_url = self.client.get_url('VERSION', 'GET', 'multi', {'layer_id': layer_id}) return base.Query(self, target_url, valid_filter_attributes=('data',), valid_sort_attributes=())
Filterable list of versions of a layer, always ordered newest to oldest. If the version’s source supports revisions, you can get a specific revision using ``.filter(data__source__revision=value)``. Specific values depend on the source type. Use ``data__source_revision__lt`` or ``data__source_revision__gte`` to filter using ``<`` or ``>=`` operators respectively.
def sigma_clipping(date, mag, err, threshold=3, iteration=1): """ Remove any fluctuated data points by magnitudes. Parameters ---------- date : array_like An array of dates. mag : array_like An array of magnitudes. err : array_like An array of magnitude errors. threshold : float, optional Threshold for sigma-clipping. iteration : int, optional The number of iteration. Returns ------- date : array_like Sigma-clipped dates. mag : array_like Sigma-clipped magnitudes. err : array_like Sigma-clipped magnitude errors. """ # Check length. if (len(date) != len(mag)) \ or (len(date) != len(err)) \ or (len(mag) != len(err)): raise RuntimeError('The length of date, mag, and err must be same.') # By magnitudes for i in range(int(iteration)): mean = np.median(mag) std = np.std(mag) index = (mag >= mean - threshold*std) & (mag <= mean + threshold*std) date = date[index] mag = mag[index] err = err[index] return date, mag, err
Remove any fluctuated data points by magnitudes. Parameters ---------- date : array_like An array of dates. mag : array_like An array of magnitudes. err : array_like An array of magnitude errors. threshold : float, optional Threshold for sigma-clipping. iteration : int, optional The number of iteration. Returns ------- date : array_like Sigma-clipped dates. mag : array_like Sigma-clipped magnitudes. err : array_like Sigma-clipped magnitude errors.
def delete(self, id): """ Delete a resource by bson id :raises: 404 Not Found :raises: 400 Bad request :raises: 500 Server Error """ try: response = yield self.client.delete(id) if response.get("n") > 0: self.write({"message": "Deleted %s object: %s" % (self.object_name, id) }) return self.raise_error(404, "Resource not found") except InvalidId as ex: self.raise_error(400, message="Your ID is malformed: %s" % id) except: self.raise_error() self.finish()
Delete a resource by bson id :raises: 404 Not Found :raises: 400 Bad request :raises: 500 Server Error
def get_sequence_rule_mdata(): """Return default mdata map for SequenceRule""" return { 'next_assessment_part': { 'element_label': { 'text': 'next assessment part', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [], }, 'cumulative': { 'element_label': { 'text': 'cumulative', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'enter either true or false.', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_boolean_values': [None], 'syntax': 'BOOLEAN', }, 'assessment_part': { 'element_label': { 'text': 'assessment part', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [], }, 'minimum_score': { 'element_label': { 'text': 'minimum score', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'enter a cardinal value', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_cardinal_values': [None], 'syntax': 'CARDINAL', 'minimum_cardinal': None, 'maximum_cardinal': None, 'cardinal_set': [] }, 'maximum_score': { 'element_label': { 'text': 'maximum score', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'enter a cardinal value', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_cardinal_values': [None], 'syntax': 'CARDINAL', 'minimum_cardinal': None, 'maximum_cardinal': None, 'cardinal_set': [] }, }
Return default mdata map for SequenceRule
def down_capture(returns, factor_returns, **kwargs): """ Compute the capture ratio for periods when the benchmark return is negative Parameters ---------- returns : pd.Series or np.ndarray Returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. factor_returns : pd.Series or np.ndarray Noncumulative returns of the factor to which beta is computed. Usually a benchmark such as the market. - This is in the same style as returns. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Value ignored if `annualization` parameter is specified. Defaults are:: 'monthly':12 'weekly': 52 'daily': 252 Returns ------- down_capture : float Note ---- See http://www.investopedia.com/terms/d/down-market-capture-ratio.asp for more information. """ return down(returns, factor_returns, function=capture, **kwargs)
Compute the capture ratio for periods when the benchmark return is negative Parameters ---------- returns : pd.Series or np.ndarray Returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. factor_returns : pd.Series or np.ndarray Noncumulative returns of the factor to which beta is computed. Usually a benchmark such as the market. - This is in the same style as returns. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Value ignored if `annualization` parameter is specified. Defaults are:: 'monthly':12 'weekly': 52 'daily': 252 Returns ------- down_capture : float Note ---- See http://www.investopedia.com/terms/d/down-market-capture-ratio.asp for more information.
def _setup_subpix(self,nside=2**16): """ Subpixels for random position generation. """ # Only setup once... if hasattr(self,'subpix'): return # Simulate over full ROI self.roi_radius = self.config['coords']['roi_radius'] # Setup background spatial stuff logger.info("Setup subpixels...") self.nside_pixel = self.config['coords']['nside_pixel'] self.nside_subpixel = self.nside_pixel * 2**4 # Could be config parameter epsilon = np.degrees(hp.max_pixrad(self.nside_pixel)) # Pad roi radius to cover edge healpix subpix = ugali.utils.healpix.query_disc(self.nside_subpixel,self.roi.vec,self.roi_radius+epsilon) superpix = ugali.utils.healpix.superpixel(subpix,self.nside_subpixel,self.nside_pixel) self.subpix = subpix[np.in1d(superpix,self.roi.pixels)]
Subpixels for random position generation.
def insert_from_segmentlistdict(self, seglists, name, version = None, comment = None, valid=None): """ Insert the segments from the segmentlistdict object seglists as a new list of "active" segments into this LigolwSegments object. The dictionary's keys are assumed to provide the instrument name for each segment list. A new entry will be created in the segment_definer table for the segment lists, and the dictionary's keys, the name, and comment will be used to populate the entry's metadata. """ for instrument, segments in seglists.items(): if valid is None: curr_valid = () else: curr_valid = valid[instrument] self.add(LigolwSegmentList(active = segments, instruments = set([instrument]), name = name, version = version, comment = comment, valid = curr_valid))
Insert the segments from the segmentlistdict object seglists as a new list of "active" segments into this LigolwSegments object. The dictionary's keys are assumed to provide the instrument name for each segment list. A new entry will be created in the segment_definer table for the segment lists, and the dictionary's keys, the name, and comment will be used to populate the entry's metadata.
def assign(self, attrs): """Merge new attributes """ for k, v in attrs.items(): setattr(self, k, v)
Merge new attributes
def discretize_wd_style(N, q, F, d, Phi): """ TODO: add documentation New implementation. I'll make this work first, then document. """ DEBUG = False Ts = [] potential = 'BinaryRoche' r0 = libphoebe.roche_pole(q, F, d, Phi) # The following is a hack that needs to go! pot_name = potential dpdx = globals()['d%sdx'%(pot_name)] dpdy = globals()['d%sdy'%(pot_name)] dpdz = globals()['d%sdz'%(pot_name)] if DEBUG: import matplotlib.pyplot as plt from matplotlib.path import Path import matplotlib.patches as patches fig = plt.figure() ax1 = fig.add_subplot(131) ax2 = fig.add_subplot(132) ax3 = fig.add_subplot(133) ax1.set_xlim(-0.3, 0.3) # -1.6 1.6 ax1.set_ylim(-0.3, 0.3) ax2.set_xlim(-0.3, 0.3) ax2.set_ylim(-0.3, 0.3) ax3.set_xlim(-0.3, 0.3) ax3.set_ylim(-0.3, 0.3) ax1.set_xlabel('x') ax1.set_ylabel('y') ax2.set_xlabel('x') ax2.set_ylabel('z') ax3.set_xlabel('y') ax3.set_ylabel('z') # Rectangle centers: theta = np.array([np.pi/2*(k-0.5)/N for k in range(1, N+2)]) phi = np.array([[np.pi*(l-0.5)/Mk for l in range(1, Mk+1)] for Mk in np.array(1 + 1.3*N*np.sin(theta), dtype=int)]) for t in range(len(theta)-1): dtheta = theta[t+1]-theta[t] for i in range(len(phi[t])): dphi = phi[t][1]-phi[t][0] # Project the vertex onto the potential; this will be our center point: rc = np.array((r0*sin(theta[t])*cos(phi[t][i]), r0*sin(theta[t])*sin(phi[t][i]), r0*cos(theta[t]))) vc = project_onto_potential(rc, potential, d, q, F, Phi).r # Next we need to find the tangential plane, which we'll get by finding the normal, # which is the negative of the gradient: nc = np.array((-dpdx(vc, d, q, F), -dpdy(vc, d, q, F), -dpdz(vc, d, q, F))) # Then we need to find the intercontext of +/-dtheta/dphi-deflected # radius vectors with the tangential plane. We do that by solving # # d = [(p0 - l0) \dot n] / (l \dot n), # # where p0 and l0 are reference points on the plane and on the line, # respectively, n is the normal vector, and l is the line direction # vector. For convenience l0 can be set to 0, and p0 is just vc. d # then measures the distance from the origin along l. l1 = np.array((sin(theta[t]-dtheta/2)*cos(phi[t][i]-dphi/2), sin(theta[t]-dtheta/2)*sin(phi[t][i]-dphi/2), cos(theta[t]-dtheta/2))) l2 = np.array((sin(theta[t]-dtheta/2)*cos(phi[t][i]+dphi/2), sin(theta[t]-dtheta/2)*sin(phi[t][i]+dphi/2), cos(theta[t]-dtheta/2))) l3 = np.array((sin(theta[t]+dtheta/2)*cos(phi[t][i]+dphi/2), sin(theta[t]+dtheta/2)*sin(phi[t][i]+dphi/2), cos(theta[t]+dtheta/2))) l4 = np.array((sin(theta[t]+dtheta/2)*cos(phi[t][i]-dphi/2), sin(theta[t]+dtheta/2)*sin(phi[t][i]-dphi/2), cos(theta[t]+dtheta/2))) r1 = np.dot(vc, nc) / np.dot(l1, nc) * l1 r2 = np.dot(vc, nc) / np.dot(l2, nc) * l2 r3 = np.dot(vc, nc) / np.dot(l3, nc) * l3 r4 = np.dot(vc, nc) / np.dot(l4, nc) * l4 # This sorts out the vertices, now we need to fudge the surface # area. WD does not take curvature of the equipotential at vc # into account, so the surface area computed from these vertex- # delimited surfaces will generally be different from what WD # computes. Thus, we compute the surface area the same way WD # does it and assign it to each element even though that isn't # quite its area: # # dsigma = || r^2 sin(theta)/cos(gamma) dtheta dphi ||, # # where gamma is the angle between l and n. cosgamma = np.dot(vc, nc)/np.sqrt(np.dot(vc, vc))/np.sqrt(np.dot(nc, nc)) dsigma = np.abs(np.dot(vc, vc)*np.sin(theta[t])/cosgamma*dtheta*dphi) # Temporary addition: triangle areas: ###################### side1 = sqrt((r1[0]-r2[0])**2 + (r1[1]-r2[1])**2 + (r1[2]-r2[2])**2) side2 = sqrt((r1[0]-r3[0])**2 + (r1[1]-r3[1])**2 + (r1[2]-r3[2])**2) side3 = sqrt((r2[0]-r3[0])**2 + (r2[1]-r3[1])**2 + (r2[2]-r3[2])**2) s = 0.5*(side1 + side2 + side3) dsigma_t_sq = s*(s-side1)*(s-side2)*(s-side3) dsigma_t = sqrt(dsigma_t_sq) if dsigma_t_sq > 0 else 0.0 ############################################################ if DEBUG: fc = 'orange' verts = [(r1[0], r1[1]), (r2[0], r2[1]), (r3[0], r3[1]), (r4[0], r4[1]), (r1[0], r1[1])] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY] path = Path(verts, codes) patch = patches.PathPatch(path, facecolor=fc, lw=2) ax1.add_patch(patch) verts = [(r1[0], r1[2]), (r2[0], r2[2]), (r3[0], r3[2]), (r4[0], r4[2]), (r1[0], r1[2])] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY] path = Path(verts, codes) patch = patches.PathPatch(path, facecolor=fc, lw=2) ax2.add_patch(patch) verts = [(r1[1], r1[2]), (r2[1], r2[2]), (r3[1], r3[2]), (r4[1], r4[2]), (r1[1], r1[2])] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY] path = Path(verts, codes) patch = patches.PathPatch(path, facecolor=fc, lw=2) ax3.add_patch(patch) # Ts.append(np.array((vc[0], vc[1], vc[2], dsigma/2, r1[0], r1[1], r1[2], r2[0], r2[1], r2[2], r3[0], r3[1], r3[2], nc[0], nc[1], nc[2]))) # Ts.append(np.array((vc[0], vc[1], vc[2], dsigma/2, r3[0], r3[1], r3[2], r4[0], r4[1], r4[2], r1[0], r1[1], r1[2], nc[0], nc[1], nc[2]))) # # Instead of recomputing all quantities, just reflect over the y- and z-directions. # Ts.append(np.array((vc[0], -vc[1], vc[2], dsigma/2, r1[0], -r1[1], r1[2], r2[0], -r2[1], r2[2], r3[0], -r3[1], r3[2], nc[0], -nc[1], nc[2]))) # Ts.append(np.array((vc[0], -vc[1], vc[2], dsigma/2, r3[0], -r3[1], r3[2], r4[0], -r4[1], r4[2], r1[0], -r1[1], r1[2], nc[0], -nc[1], nc[2]))) # Ts.append(np.array((vc[0], vc[1], -vc[2], dsigma/2, r1[0], r1[1], -r1[2], r2[0], r2[1], -r2[2], r3[0], r3[1], -r3[2], nc[0], nc[1], -nc[2]))) # Ts.append(np.array((vc[0], vc[1], -vc[2], dsigma/2, r3[0], r3[1], -r3[2], r4[0], r4[1], -r4[2], r1[0], r1[1], -r1[2], nc[0], nc[1], -nc[2]))) # Ts.append(np.array((vc[0], -vc[1], -vc[2], dsigma/2, r1[0], -r1[1], -r1[2], r2[0], -r2[1], -r2[2], r3[0], -r3[1], -r3[2], nc[0], -nc[1], -nc[2]))) # Ts.append(np.array((vc[0], -vc[1], -vc[2], dsigma/2, r3[0], -r3[1], -r3[2], r4[0], -r4[1], -r4[2], r1[0], -r1[1], -r1[2], nc[0], -nc[1], -nc[2]))) # FOR TESTING - report theta/phi for each triangle # uncomment the above original version eventually Ts.append(np.array((vc[0], vc[1], vc[2], dsigma/2, r1[0], r1[1], r1[2], r2[0], r2[1], r2[2], r3[0], r3[1], r3[2], nc[0], nc[1], nc[2], theta[t], phi[t][0], dsigma_t))) Ts.append(np.array((vc[0], vc[1], vc[2], dsigma/2, r3[0], r3[1], r3[2], r4[0], r4[1], r4[2], r1[0], r1[1], r1[2], nc[0], nc[1], nc[2], theta[t], phi[t][0], dsigma_t))) # Instead of recomputing all quantities, just reflect over the y- and z-directions. Ts.append(np.array((vc[0], -vc[1], vc[2], dsigma/2, r1[0], -r1[1], r1[2], r2[0], -r2[1], r2[2], r3[0], -r3[1], r3[2], nc[0], -nc[1], nc[2], theta[t], -phi[t][0], dsigma_t))) Ts.append(np.array((vc[0], -vc[1], vc[2], dsigma/2, r3[0], -r3[1], r3[2], r4[0], -r4[1], r4[2], r1[0], -r1[1], r1[2], nc[0], -nc[1], nc[2], theta[t], -phi[t][0], dsigma_t))) Ts.append(np.array((vc[0], vc[1], -vc[2], dsigma/2, r1[0], r1[1], -r1[2], r2[0], r2[1], -r2[2], r3[0], r3[1], -r3[2], nc[0], nc[1], -nc[2], np.pi-theta[t], phi[t][0], dsigma_t))) Ts.append(np.array((vc[0], vc[1], -vc[2], dsigma/2, r3[0], r3[1], -r3[2], r4[0], r4[1], -r4[2], r1[0], r1[1], -r1[2], nc[0], nc[1], -nc[2], np.pi-theta[t], phi[t][0], dsigma_t))) Ts.append(np.array((vc[0], -vc[1], -vc[2], dsigma/2, r1[0], -r1[1], -r1[2], r2[0], -r2[1], -r2[2], r3[0], -r3[1], -r3[2], nc[0], -nc[1], -nc[2], np.pi-theta[t], -phi[t][0], dsigma_t))) Ts.append(np.array((vc[0], -vc[1], -vc[2], dsigma/2, r3[0], -r3[1], -r3[2], r4[0], -r4[1], -r4[2], r1[0], -r1[1], -r1[2], nc[0], -nc[1], -nc[2], np.pi-theta[t], -phi[t][0], dsigma_t))) if DEBUG: plt.show() # Assemble a mesh table: table = np.array(Ts) return table
TODO: add documentation New implementation. I'll make this work first, then document.
def on_service_departure(self, svc_ref): """ Called when a service has been unregistered from the framework :param svc_ref: A service reference """ with self._lock: if svc_ref is self.reference: # Forget about the service self._value.unset_service() # Clear the reference self.reference = None # Look for a replacement self._pending_ref = self._context.get_service_reference( self.requirement.specification, self.requirement.filter ) if self._pending_ref is None: # No replacement found yet, wait a little self.__still_valid = True self.__timer_args = (self._value, svc_ref) self.__timer = threading.Timer( self.__timeout, self.__unbind_call, (False,) ) self.__timer.start() else: # Notify iPOPO immediately self._ipopo_instance.unbind(self, self._value, svc_ref) return True return None
Called when a service has been unregistered from the framework :param svc_ref: A service reference
def run_friedman_smooth(x, y, span): """Run the FORTRAN smoother.""" N = len(x) weight = numpy.ones(N) results = numpy.zeros(N) residuals = numpy.zeros(N) mace.smooth(x, y, weight, span, 1, 1e-7, results, residuals) return results, residuals
Run the FORTRAN smoother.
def forwards(self, orm): "Write your forwards methods here." db_table = orm['samples.CohortVariant']._meta.db_table db.execute('TRUNCATE TABLE "{0}"'.format(db_table)) db.execute('ALTER SEQUENCE "{0}_id_seq" RESTART 1'.format(db_table)) cohorts = list(orm['samples.Cohort'].objects.all()) for c in cohorts: # Calculate frequencies for all variants associated with all # samples in this cohort db.execute(''' INSERT INTO cohort_variant (cohort_id, variant_id, af) ( SELECT c.id, r.variant_id, COUNT(r.id) / c."count"::float FROM sample_result r INNER JOIN sample s ON (r.sample_id = s.id) INNER JOIN cohort_sample cs ON (cs.sample_id = s.id) INNER JOIN cohort c ON (cs.cohort_id = c.id) WHERE c.id = %s GROUP BY c.id, r.variant_id, c."count" ) ''', [c.pk])
Write your forwards methods here.
def ObsBandpass(obstring, graphtable=None, comptable=None, component_dict={}): """Generate a bandpass object from observation mode. If the bandpass consists of multiple throughput files (e.g., "acs,hrc,f555w"), then `ObsModeBandpass` is returned. Otherwise, if it consists of a single throughput file (e.g., "johnson,v"), then `~pysynphot.spectrum.TabularSpectralElement` is returned. See :ref:`pysynphot-obsmode-bandpass` and :ref:`pysynphot-appendixb` for more details. Parameters ---------- obstring : str Observation mode. graphtable, comptable, component_dict See `~pysynphot.observationmode.ObservationMode`. Returns ------- bp : `~pysynphot.spectrum.TabularSpectralElement` or `ObsModeBandpass` Examples -------- >>> bp1 = S.ObsBandpass('acs,hrc,f555w') >>> bp2 = S.ObsBandpass('johnson,v') """ ##Temporarily create an Obsmode to determine whether an ##ObsModeBandpass or a TabularSpectralElement will be returned. ob=ObservationMode(obstring,graphtable=graphtable, comptable=comptable,component_dict=component_dict) if len(ob) > 1: return ObsModeBandpass(ob) else: return TabularSpectralElement(ob.components[0].throughput_name)
Generate a bandpass object from observation mode. If the bandpass consists of multiple throughput files (e.g., "acs,hrc,f555w"), then `ObsModeBandpass` is returned. Otherwise, if it consists of a single throughput file (e.g., "johnson,v"), then `~pysynphot.spectrum.TabularSpectralElement` is returned. See :ref:`pysynphot-obsmode-bandpass` and :ref:`pysynphot-appendixb` for more details. Parameters ---------- obstring : str Observation mode. graphtable, comptable, component_dict See `~pysynphot.observationmode.ObservationMode`. Returns ------- bp : `~pysynphot.spectrum.TabularSpectralElement` or `ObsModeBandpass` Examples -------- >>> bp1 = S.ObsBandpass('acs,hrc,f555w') >>> bp2 = S.ObsBandpass('johnson,v')
def getLayout(kind=None,theme=None,title='',xTitle='',yTitle='',zTitle='',barmode='',bargap=None,bargroupgap=None, margin=None, dimensions=None, width=None, height=None, annotations=None,is3d=False,**kwargs): """ Generates a plotly Layout Parameters: ----------- theme : string Layout Theme solar pearl white title : string Chart Title xTitle : string X Axis Title yTitle : string Y Axis Title zTitle : string Z Axis Title Applicable only for 3d charts barmode : string Mode when displaying bars group stack overlay bargap : float Sets the gap between bars [0,1) Applicabe for bar and histogram plots bargroupgap : float Set the gap between groups [0,1) Applicabe for bar and histogram plots gridcolor : string grid color zerolinecolor : string zero line color margin : dict or tuple Dictionary (l,r,b,t) or Tuple containing the left, right, bottom and top margins dimensions : tuple Dimensions of figure annotations : dict or list Dictionary of annotations {x_point : text} or List of Plotly Annotations is3d : bool Indicates if the layout is for a 3D chart Other Kwargs ============ Shapes hline : int, list or dict Draws a horizontal line at the indicated y position(s) Extra parameters can be passed in the form of a dictionary (see shapes) vline : int, list or dict Draws a vertical line at the indicated x position(s) Extra parameters can be passed in the form of a dictionary (see shapes) hspan : (y0,y1) Draws a horizontal rectangle at the indicated (y0,y1) positions. Extra parameters can be passed in the form of a dictionary (see shapes) vspan : (x0,x1) Draws a vertical rectangle at the indicated (x0,x1) positions. Extra parameters can be passed in the form of a dictionary (see shapes) shapes : dict or list(dict) List of dictionaries with the specifications of a given shape. See help(cufflinks.tools.get_shape) for more information Axis Ranges xrange : [lower_bound,upper_bound] Sets the range for the x axis yrange : [lower_bound,upper_bound] Sets the range for the y axis zrange : [lower_bound,upper_bound] Sets the range for the z axis Explicit Layout Updates layout_update : dict The layout will be modified with all the explicit values stated in the dictionary Range Selector rangeselector : dict Defines a rangeselector object see help(cf.tools.get_range_selector) for more information Example: {'steps':['1y','2 months','5 weeks','ytd','2mtd'], 'axis':'xaxis', 'bgcolor' : ('blue',.3), 'x': 0.2 , 'y' : 0.9} Range Slider rangeslider : bool or dict Defines if a rangeslider is displayed If bool: True : Makes it visible if dict: Rangeslider object Example: {'bgcolor':('blue',.3),'autorange':True} Annotations fontcolor : str Text color for annotations fontsize : int Text size for annotations textangle : int Textt angle See https://plot.ly/python/reference/#layout-annotations for a complete list of valid parameters. """ for key in list(kwargs.keys()): if key not in __LAYOUT_KWARGS: raise Exception("Invalid keyword : '{0}'".format(key)) if not theme: theme = auth.get_config_file()['theme'] theme_data = getTheme(theme) layout=theme_data['layout'] layout['xaxis'].update({'title':xTitle}) layout['yaxis'].update({'title':yTitle}) fontfamily=kwargs.pop('fontfamily',None) if fontfamily: deep_update(layout,{'font':{'family':fontfamily}}) if barmode: layout.update({'barmode':barmode}) if bargroupgap: layout.update({'bargroupgap':bargroupgap}) if bargap: layout.update(bargap=bargap) if title: layout.update({'title':title}) if annotations: layout.update({'annotations':annotations}) def update_axis(layout,axis='xy',**vals): for _x in axis: for k,v in list(vals.items()): if v==None: vals.pop(k) for k in layout: if '{0}{1}'.format(_x,'axis') in k: layout[k].update(**vals) return layout axis_kwargs=check_kwargs(kwargs,__LAYOUT_AXIS,{},True) xaxis_kwargs=kwargs_from_keyword(kwargs,{},'xaxis',True) yaxis_kwargs=kwargs_from_keyword(kwargs,{},'yaxis',True) for _x,_vals in (('xy',axis_kwargs),('x',xaxis_kwargs),('y',yaxis_kwargs)): layout=update_axis(layout,_x,**_vals) if margin: if isinstance(margin,dict): margin=margin else: margin=dict(list(zip(('l','r','b','t'),margin))) layout.update(margin=margin) if dimensions: layout.update(width=dimensions[0]) layout.update(height=dimensions[1]) if height: layout.update(height=height) if width: layout.update(width=width) if is3d: if '3d' in theme_data: layout=deep_update(layout,theme_data['3d']) zaxis=layout['xaxis'].copy() zaxis.update(title=zTitle) scene=dict(xaxis=layout['xaxis'].copy(),yaxis=layout['yaxis'].copy(),zaxis=zaxis) layout.update(scene=scene) del layout['xaxis'] del layout['yaxis'] ## Axis Range for r in ['x','y','z']: if '{0}range'.format(r) in kwargs: if is3d: layout['scene']['{0}axis'.format(r)].update(range=kwargs['{0}range'.format(r)]) else: layout['{0}axis'.format(r)].update(range=kwargs['{0}range'.format(r)]) # Need to update this for an add_axis approach. if kind in ('candlestick','ohlc','candle'): layout['yaxis2']=layout['yaxis'].copy() layout['yaxis'].update(showticklabels=False) ## Kwargs if 'legend' in kwargs: if type(kwargs['legend'])==bool: layout['showlegend']=kwargs['legend'] elif type(kwargs['legend'])==str: if kwargs['legend']=='top': layout['legend'].update(orientation='h',yanchor='bottom',x=.3,y=.95) elif kwargs['legend']=='bottom': layout['legend'].update(orientation='h',yanchor='bottom',x=.3,y=-0.5) layout['showlegend']=True else: layout['legend']=kwargs['legend'] layout['showlegend']=True if 'showlegend' in kwargs: layout['showlegend']=kwargs['showlegend'] # Logarithmic Axis for _ in ['x','y','z']: if 'log{0}'.format(_) in kwargs: if is3d: if kwargs['log{0}'.format(_)]: layout['scene']['{0}axis'.format(_)]['type']='log' else: if kwargs['log{0}'.format(_)]: layout['{0}axis'.format(_)]['type']='log' # Shapes if any(k in kwargs for k in ['vline','hline','shapes','hspan','vspan']): shapes=[] def get_shapes(xline): orientation=xline[0] xline=kwargs[xline] if isinstance(xline,list): for x_i in xline: if isinstance(x_i,dict): x_i['kind']='line' shapes.append(get_shape(**x_i)) else: if orientation=='h': shapes.append(get_shape(kind='line',y=x_i)) else: shapes.append(get_shape(kind='line',x=x_i)) elif isinstance(xline,dict): shapes.append(get_shape(**xline)) else: if orientation=='h': shapes.append(get_shape(kind='line',y=xline)) else: shapes.append(get_shape(kind='line',x=xline)) def get_span(xspan): orientation=xspan[0] xspan=kwargs[xspan] if isinstance(xspan,list): for x_i in xspan: if isinstance(x_i,dict): x_i['kind']='rect' shapes.append(get_shape(**x_i)) else: v0,v1=x_i if orientation=='h': shapes.append(get_shape(kind='rect',y0=v0,y1=v1,fill=True,opacity=.5)) else: shapes.append(get_shape(kind='rect',x0=v0,x1=v1,fill=True,opacity=.5)) elif isinstance(xspan,dict): xspan['kind']='rect' shapes.append(get_shape(**xspan)) elif isinstance(xspan,tuple): v0,v1=xspan if orientation=='h': shapes.append(get_shape(kind='rect',y0=v0,y1=v1,fill=True,opacity=.5)) else: shapes.append(get_shape(kind='rect',x0=v0,x1=v1,fill=True,opacity=.5)) else: raise Exception('Invalid value for {0}span: {1}'.format(orientation,xspan)) if 'hline' in kwargs: get_shapes('hline') if 'vline' in kwargs: get_shapes('vline') if 'hspan' in kwargs: get_span('hspan') if 'vspan' in kwargs: get_span('vspan') if 'shapes' in kwargs: shapes_=kwargs['shapes'] if isinstance(shapes_,list): for i in shapes_: shp=i if 'type' in i else get_shape(**i) shapes.append(shp) elif isinstance(shapes_,dict): shp=shapes_ if 'type' in shapes_ else get_shape(**shapes_) shapes.append(shp) else: raise Exception("Shapes need to be either a dict or list of dicts") layout['shapes']=shapes # Maps if kind in ('choropleth','scattergeo'): kw=check_kwargs(kwargs,__GEO_KWARGS) defaults={'projection':{'type':'equirectangular'},'showframe':False,'showcoastlines':False} for k,v in list(defaults.items()): if k not in kw: kw[k]=v kw_=kwargs_from_keyword(kw,{},'projection') deep_update(kw,kw_) layout['geo']=kw del layout['xaxis'] del layout['yaxis'] if not margin: layout['margin']={'autoexpand':True} # Range Selector if 'rangeselector' in kwargs: rs=kwargs['rangeselector'] if 'axis' in rs: axis=rs['axis'] del rs['axis'] else: axis='xaxis' layout[axis]['rangeselector']=get_range_selector(**rs) # Range Slider if 'rangeslider' in kwargs: if type(kwargs['rangeslider'])==bool: if kwargs['rangeslider']: layout['xaxis']['rangeslider']=dict(visible=kwargs['rangeslider']) else: layout['xaxis']['rangeslider']=dict(visible=False) # layout['yaxis1'].update(domain=(0,0)) else: layout['xaxis']['rangeslider']=kwargs['rangeslider'] else: if kind in ('ohlc','candle','candlestick'): layout['xaxis']['rangeslider']=dict(visible=False) # layout['yaxis1'].update(domain=(0,0)) # Explicit Updates if 'layout_update' in kwargs: layout=deep_update(layout,kwargs['layout_update']) return layout
Generates a plotly Layout Parameters: ----------- theme : string Layout Theme solar pearl white title : string Chart Title xTitle : string X Axis Title yTitle : string Y Axis Title zTitle : string Z Axis Title Applicable only for 3d charts barmode : string Mode when displaying bars group stack overlay bargap : float Sets the gap between bars [0,1) Applicabe for bar and histogram plots bargroupgap : float Set the gap between groups [0,1) Applicabe for bar and histogram plots gridcolor : string grid color zerolinecolor : string zero line color margin : dict or tuple Dictionary (l,r,b,t) or Tuple containing the left, right, bottom and top margins dimensions : tuple Dimensions of figure annotations : dict or list Dictionary of annotations {x_point : text} or List of Plotly Annotations is3d : bool Indicates if the layout is for a 3D chart Other Kwargs ============ Shapes hline : int, list or dict Draws a horizontal line at the indicated y position(s) Extra parameters can be passed in the form of a dictionary (see shapes) vline : int, list or dict Draws a vertical line at the indicated x position(s) Extra parameters can be passed in the form of a dictionary (see shapes) hspan : (y0,y1) Draws a horizontal rectangle at the indicated (y0,y1) positions. Extra parameters can be passed in the form of a dictionary (see shapes) vspan : (x0,x1) Draws a vertical rectangle at the indicated (x0,x1) positions. Extra parameters can be passed in the form of a dictionary (see shapes) shapes : dict or list(dict) List of dictionaries with the specifications of a given shape. See help(cufflinks.tools.get_shape) for more information Axis Ranges xrange : [lower_bound,upper_bound] Sets the range for the x axis yrange : [lower_bound,upper_bound] Sets the range for the y axis zrange : [lower_bound,upper_bound] Sets the range for the z axis Explicit Layout Updates layout_update : dict The layout will be modified with all the explicit values stated in the dictionary Range Selector rangeselector : dict Defines a rangeselector object see help(cf.tools.get_range_selector) for more information Example: {'steps':['1y','2 months','5 weeks','ytd','2mtd'], 'axis':'xaxis', 'bgcolor' : ('blue',.3), 'x': 0.2 , 'y' : 0.9} Range Slider rangeslider : bool or dict Defines if a rangeslider is displayed If bool: True : Makes it visible if dict: Rangeslider object Example: {'bgcolor':('blue',.3),'autorange':True} Annotations fontcolor : str Text color for annotations fontsize : int Text size for annotations textangle : int Textt angle See https://plot.ly/python/reference/#layout-annotations for a complete list of valid parameters.
def memoized(maxsize=1024): """ Momoization decorator for immutable classes and pure functions. """ cache = SimpleCache(maxsize=maxsize) def decorator(obj): @wraps(obj) def new_callable(*a, **kw): def create_new(): return obj(*a, **kw) key = (a, tuple(kw.items())) return cache.get(key, create_new) return new_callable return decorator
Momoization decorator for immutable classes and pure functions.
def build_map_type_validator(item_validator): """Return a function which validates that the value is a mapping of items. The function should return pairs of items that will be passed to the `dict` constructor. """ def validate_mapping(value): return dict(item_validator(item) for item in validate_list(value)) return validate_mapping
Return a function which validates that the value is a mapping of items. The function should return pairs of items that will be passed to the `dict` constructor.
def calc_remotedemand_v1(self): """Estimate the discharge demand of a cross section far downstream. Required control parameter: |RemoteDischargeMinimum| Required derived parameters: |dam_derived.TOY| Required flux sequence: |dam_derived.TOY| Calculated flux sequence: |RemoteDemand| Basic equation: :math:`RemoteDemand = max(RemoteDischargeMinimum - NaturalRemoteDischarge, 0` Examples: Low water elevation is often restricted to specific month of the year. Sometimes the pursued lowest discharge value varies over the year to allow for a low flow variability that is in some agreement with the natural flow regime. The HydPy-Dam model supports such variations. Hence we define a short simulation time period first. This enables us to show how the related parameters values can be defined and how the calculation of the `remote` water demand throughout the year actually works: >>> from hydpy import pub >>> pub.timegrids = '2001.03.30', '2001.04.03', '1d' Prepare the dam model: >>> from hydpy.models.dam import * >>> parameterstep() Assume the required discharge at a gauge downstream being 2 m³/s in the hydrological summer half-year (April to October). In the winter month (November to May), there is no such requirement: >>> remotedischargeminimum(_11_1_12=0.0, _03_31_12=0.0, ... _04_1_12=2.0, _10_31_12=2.0) >>> derived.toy.update() Prepare a test function, that calculates the remote discharge demand based on the parameter values defined above and for natural remote discharge values ranging between 0 and 3 m³/s: >>> from hydpy import UnitTest >>> test = UnitTest(model, model.calc_remotedemand_v1, last_example=4, ... parseqs=(fluxes.naturalremotedischarge, ... fluxes.remotedemand)) >>> test.nexts.naturalremotedischarge = range(4) On April 1, the required discharge is 2 m³/s: >>> model.idx_sim = pub.timegrids.init['2001.04.01'] >>> test() | ex. | naturalremotedischarge | remotedemand | ----------------------------------------------- | 1 | 0.0 | 2.0 | | 2 | 1.0 | 1.0 | | 3 | 2.0 | 0.0 | | 4 | 3.0 | 0.0 | On May 31, the required discharge is 0 m³/s: >>> model.idx_sim = pub.timegrids.init['2001.03.31'] >>> test() | ex. | naturalremotedischarge | remotedemand | ----------------------------------------------- | 1 | 0.0 | 0.0 | | 2 | 1.0 | 0.0 | | 3 | 2.0 | 0.0 | | 4 | 3.0 | 0.0 | """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess flu.remotedemand = max(con.remotedischargeminimum[der.toy[self.idx_sim]] - flu.naturalremotedischarge, 0.)
Estimate the discharge demand of a cross section far downstream. Required control parameter: |RemoteDischargeMinimum| Required derived parameters: |dam_derived.TOY| Required flux sequence: |dam_derived.TOY| Calculated flux sequence: |RemoteDemand| Basic equation: :math:`RemoteDemand = max(RemoteDischargeMinimum - NaturalRemoteDischarge, 0` Examples: Low water elevation is often restricted to specific month of the year. Sometimes the pursued lowest discharge value varies over the year to allow for a low flow variability that is in some agreement with the natural flow regime. The HydPy-Dam model supports such variations. Hence we define a short simulation time period first. This enables us to show how the related parameters values can be defined and how the calculation of the `remote` water demand throughout the year actually works: >>> from hydpy import pub >>> pub.timegrids = '2001.03.30', '2001.04.03', '1d' Prepare the dam model: >>> from hydpy.models.dam import * >>> parameterstep() Assume the required discharge at a gauge downstream being 2 m³/s in the hydrological summer half-year (April to October). In the winter month (November to May), there is no such requirement: >>> remotedischargeminimum(_11_1_12=0.0, _03_31_12=0.0, ... _04_1_12=2.0, _10_31_12=2.0) >>> derived.toy.update() Prepare a test function, that calculates the remote discharge demand based on the parameter values defined above and for natural remote discharge values ranging between 0 and 3 m³/s: >>> from hydpy import UnitTest >>> test = UnitTest(model, model.calc_remotedemand_v1, last_example=4, ... parseqs=(fluxes.naturalremotedischarge, ... fluxes.remotedemand)) >>> test.nexts.naturalremotedischarge = range(4) On April 1, the required discharge is 2 m³/s: >>> model.idx_sim = pub.timegrids.init['2001.04.01'] >>> test() | ex. | naturalremotedischarge | remotedemand | ----------------------------------------------- | 1 | 0.0 | 2.0 | | 2 | 1.0 | 1.0 | | 3 | 2.0 | 0.0 | | 4 | 3.0 | 0.0 | On May 31, the required discharge is 0 m³/s: >>> model.idx_sim = pub.timegrids.init['2001.03.31'] >>> test() | ex. | naturalremotedischarge | remotedemand | ----------------------------------------------- | 1 | 0.0 | 0.0 | | 2 | 1.0 | 0.0 | | 3 | 2.0 | 0.0 | | 4 | 3.0 | 0.0 |
def create_frames(until=None): """Create frames available in the JPL files Args: until (str): Name of the body you want to create the frame of, and all frames in between. If ``None`` all the frames available in the .bsp files will be created Example: .. code-block:: python # All frames between Earth and Mars are created (Earth, EarthBarycenter, # SolarSystemBarycenter, MarsBarycenter and Mars) create_frames(until='Mars') # All frames between Earth and Phobos are created (Earth, EarthBarycenter, # SolarSystemBarycenter, MarsBarycenter and Phobos) create_frames(until='Phobos') # All frames available in the .bsp files are created create_frames() """ now = Date.now() if until: get_orbit(until, now) else: for body in list_bodies(): get_orbit(body.name, now)
Create frames available in the JPL files Args: until (str): Name of the body you want to create the frame of, and all frames in between. If ``None`` all the frames available in the .bsp files will be created Example: .. code-block:: python # All frames between Earth and Mars are created (Earth, EarthBarycenter, # SolarSystemBarycenter, MarsBarycenter and Mars) create_frames(until='Mars') # All frames between Earth and Phobos are created (Earth, EarthBarycenter, # SolarSystemBarycenter, MarsBarycenter and Phobos) create_frames(until='Phobos') # All frames available in the .bsp files are created create_frames()
def edit_item(self): """Edit item""" index = self.currentIndex() if not index.isValid(): return # TODO: Remove hard coded "Value" column number (3 here) self.edit(index.child(index.row(), 3))
Edit item
def _find_cont_fitfunc(fluxes, ivars, contmask, deg, ffunc, n_proc=1): """ Fit a continuum to a continuum pixels in a segment of spectra Functional form can be either sinusoid or chebyshev, with specified degree Parameters ---------- fluxes: numpy ndarray of shape (nstars, npixels) training set or test set pixel intensities ivars: numpy ndarray of shape (nstars, npixels) inverse variances, parallel to fluxes contmask: numpy ndarray of length (npixels) boolean pixel mask, True indicates that pixel is continuum deg: int degree of fitting function ffunc: str type of fitting function, chebyshev or sinusoid Returns ------- cont: numpy ndarray of shape (nstars, npixels) the continuum, parallel to fluxes """ nstars = fluxes.shape[0] npixels = fluxes.shape[1] cont = np.zeros(fluxes.shape) if n_proc == 1: for jj in range(nstars): flux = fluxes[jj,:] ivar = ivars[jj,:] pix = np.arange(0, npixels) y = flux[contmask] x = pix[contmask] yivar = ivar[contmask] yivar[yivar == 0] = SMALL**2 if ffunc=="sinusoid": p0 = np.ones(deg*2) # one for cos, one for sin L = max(x)-min(x) pcont_func = _partial_func(_sinusoid, L=L, y=flux) popt, pcov = opt.curve_fit(pcont_func, x, y, p0=p0, sigma=1./np.sqrt(yivar)) elif ffunc=="chebyshev": fit = np.polynomial.chebyshev.Chebyshev.fit(x=x,y=y,w=yivar,deg=deg) for element in pix: if ffunc=="sinusoid": cont[jj,element] = _sinusoid(element, popt, L=L, y=flux) elif ffunc=="chebyshev": cont[jj,element] = fit(element) else: # start mp.Pool pool = mp.Pool(processes=n_proc) mp_results = [] for i in xrange(nstars): mp_results.append(pool.apply_async(\ _find_cont_fitfunc, (fluxes[i, :].reshape((1, -1)), ivars[i, :].reshape((1, -1)), contmask[:]), {'deg':deg, 'ffunc':ffunc})) # close mp.Pool pool.close() pool.join() cont = np.array([mp_results[i].get().flatten() for i in xrange(nstars)]) return cont
Fit a continuum to a continuum pixels in a segment of spectra Functional form can be either sinusoid or chebyshev, with specified degree Parameters ---------- fluxes: numpy ndarray of shape (nstars, npixels) training set or test set pixel intensities ivars: numpy ndarray of shape (nstars, npixels) inverse variances, parallel to fluxes contmask: numpy ndarray of length (npixels) boolean pixel mask, True indicates that pixel is continuum deg: int degree of fitting function ffunc: str type of fitting function, chebyshev or sinusoid Returns ------- cont: numpy ndarray of shape (nstars, npixels) the continuum, parallel to fluxes
def _polar(self): """The "hidden" polar axis used for azimuth labels.""" # This will be called inside LambertAxes.__init__ as well as every # time the axis is cleared, so we need the try/except to avoid having # multiple hidden axes when `cla` is _manually_ called. try: return self._hidden_polar_axes except AttributeError: fig = self.get_figure() self._hidden_polar_axes = fig.add_axes(self.get_position(True), frameon=False, projection='polar') self._hidden_polar_axes.format_coord = self._polar_format_coord return self._hidden_polar_axes
The "hidden" polar axis used for azimuth labels.
def covar_plotter3d_plotly(embedding, rieman_metric, inspect_points_idx, colors, **kwargs): """3 Dimensional Covariance plotter using matplotlib backend.""" def rgb2hex(rgb): return '#%02x%02x%02x' % tuple(rgb) return [ plt_data for idx in inspect_points_idx for plt_data in plot_ellipse_plotly( rieman_metric[idx], embedding[idx], color=rgb2hex(colors[idx]), **kwargs) ]
3 Dimensional Covariance plotter using matplotlib backend.
def make_input_from_plain_string(sentence_id: SentenceId, string: str) -> TranslatorInput: """ Returns a TranslatorInput object from a plain string. :param sentence_id: Sentence id. :param string: An input string. :return: A TranslatorInput. """ return TranslatorInput(sentence_id, tokens=list(data_io.get_tokens(string)), factors=None)
Returns a TranslatorInput object from a plain string. :param sentence_id: Sentence id. :param string: An input string. :return: A TranslatorInput.
def delete(self, name): ''' Delete time series by name across all intervals. Returns the number of records deleted. ''' conn = self._client.connect() conn.execute( self._table.delete().where(self._table.c.name==name) )
Delete time series by name across all intervals. Returns the number of records deleted.
def max_profit_optimized(prices): """ input: [7, 1, 5, 3, 6, 4] diff : [X, -6, 4, -2, 3, -2] :type prices: List[int] :rtype: int """ cur_max, max_so_far = 0, 0 for i in range(1, len(prices)): cur_max = max(0, cur_max + prices[i] - prices[i-1]) max_so_far = max(max_so_far, cur_max) return max_so_far
input: [7, 1, 5, 3, 6, 4] diff : [X, -6, 4, -2, 3, -2] :type prices: List[int] :rtype: int
def find_view_function(module_name, function_name, fallback_app=None, fallback_template=None, verify_decorator=True): ''' Finds a view function, class-based view, or template view. Raises ViewDoesNotExist if not found. ''' dmp = apps.get_app_config('django_mako_plus') # I'm first calling find_spec first here beacuse I don't want import_module in # a try/except -- there are lots of reasons that importing can fail, and I just want to # know whether the file actually exists. find_spec raises AttributeError if not found. try: spec = find_spec(module_name) except ValueError: spec = None if spec is None: # no view module, so create a view function that directly renders the template try: return create_view_for_template(fallback_app, fallback_template) except TemplateDoesNotExist as e: raise ViewDoesNotExist('view module {} not found, and fallback template {} could not be loaded ({})'.format(module_name, fallback_template, e)) # load the module and function try: module = import_module(module_name) func = getattr(module, function_name) func.view_type = 'function' except ImportError as e: raise ViewDoesNotExist('module "{}" could not be imported: {}'.format(module_name, e)) except AttributeError as e: raise ViewDoesNotExist('module "{}" found successfully, but "{}" was not found: {}'.format(module_name, function_name, e)) # if class-based view, call as_view() to get a view function to it if inspect.isclass(func) and issubclass(func, View): func = func.as_view() func.view_type = 'class' # if regular view function, check the decorator elif verify_decorator and not view_function.is_decorated(func): raise ViewDoesNotExist("view {}.{} was found successfully, but it must be decorated with @view_function or be a subclass of django.views.generic.View.".format(module_name, function_name)) # attach a converter to the view function if dmp.options['PARAMETER_CONVERTER'] is not None: try: converter = import_qualified(dmp.options['PARAMETER_CONVERTER'])(func) setattr(func, CONVERTER_ATTRIBUTE_NAME, converter) except ImportError as e: raise ImproperlyConfigured('Cannot find PARAMETER_CONVERTER: {}'.format(str(e))) # return the function/class return func
Finds a view function, class-based view, or template view. Raises ViewDoesNotExist if not found.
def get_timedelta_str(timedelta, exclude_zeros=False): """ get_timedelta_str Returns: str: timedelta_str, formated time string References: http://stackoverflow.com/questions/8906926/formatting-python-timedelta-objects Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> timedelta = get_unix_timedelta(10) >>> timedelta_str = get_timedelta_str(timedelta) >>> result = (timedelta_str) >>> print(result) 10 seconds """ if timedelta == datetime.timedelta(0): return '0 seconds' days = timedelta.days hours, rem = divmod(timedelta.seconds, 3600) minutes, seconds = divmod(rem, 60) fmtstr_list = [] fmtdict = {} def append_cases(unit, fmtlbl): if not exclude_zeros or unit != 0: if unit == 1: fmtstr_list.append('{%s} %s' % (fmtlbl, fmtlbl)) else: fmtstr_list.append('{%s} %ss' % (fmtlbl, fmtlbl)) fmtdict[fmtlbl] = unit if abs(days) > 0: append_cases(days, 'day') if len(fmtstr_list) > 0 or abs(hours) > 0: append_cases(hours, 'hour') if len(fmtstr_list) > 0 or abs(minutes) > 0: append_cases(minutes, 'minute') if len(fmtstr_list) > 0 or abs(seconds) > 0: append_cases(seconds, 'second') fmtstr = ' '.join(fmtstr_list) timedelta_str = fmtstr.format(**fmtdict) return timedelta_str
get_timedelta_str Returns: str: timedelta_str, formated time string References: http://stackoverflow.com/questions/8906926/formatting-python-timedelta-objects Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> timedelta = get_unix_timedelta(10) >>> timedelta_str = get_timedelta_str(timedelta) >>> result = (timedelta_str) >>> print(result) 10 seconds
def from_response(cls, response, attrs): """ Create an index from returned Dynamo data """ proj = response['Projection'] index = cls(proj['ProjectionType'], response['IndexName'], attrs[response['KeySchema'][1]['AttributeName']], proj.get('NonKeyAttributes')) index.response = response return index
Create an index from returned Dynamo data
def patch_clean_fields(model): """ Patch clean_fields method to handle different form types submission. """ old_clean_fields = model.clean_fields def new_clean_fields(self, exclude=None): if hasattr(self, '_mt_form_pending_clear'): # Some form translation fields has been marked as clearing value. # Check if corresponding translated field was also saved (not excluded): # - if yes, it seems like form for MT-unaware app. Ignore clearing (left value from # translated field unchanged), as if field was omitted from form # - if no, then proceed as normally: clear the field for field_name, value in self._mt_form_pending_clear.items(): field = self._meta.get_field(field_name) orig_field_name = field.translated_field.name if orig_field_name in exclude: field.save_form_data(self, value, check=False) delattr(self, '_mt_form_pending_clear') old_clean_fields(self, exclude) model.clean_fields = new_clean_fields
Patch clean_fields method to handle different form types submission.
def get(self, name): """Returns an interface as a set of key/value pairs Args: name (string): the interface identifier to retrieve the from the configuration Returns: A Python dictionary object of key/value pairs that represent the current configuration for the specified node. If the specified interface name does not exist, then None is returned:: { "name": <string>, "type": "ethernet", "sflow": [true, false], "flowcontrol_send": [on, off], "flowcontrol_receive": [on, off] } """ config = self.get_block('^interface %s' % name) if not config: return None resource = super(EthernetInterface, self).get(name) resource.update(dict(name=name, type='ethernet')) resource.update(self._parse_sflow(config)) resource.update(self._parse_flowcontrol_send(config)) resource.update(self._parse_flowcontrol_receive(config)) return resource
Returns an interface as a set of key/value pairs Args: name (string): the interface identifier to retrieve the from the configuration Returns: A Python dictionary object of key/value pairs that represent the current configuration for the specified node. If the specified interface name does not exist, then None is returned:: { "name": <string>, "type": "ethernet", "sflow": [true, false], "flowcontrol_send": [on, off], "flowcontrol_receive": [on, off] }