max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
test/crossrunner/compat.py
BluechipSystems/thrift
0
900
import os import sys if sys.version_info[0] == 2: _ENCODE = sys.getfilesystemencoding() def path_join(*args): bin_args = map(lambda a: a.decode(_ENCODE), args) return os.path.join(*bin_args).encode(_ENCODE) def str_join(s, l): bin_args = map(lambda a: a.decode(_ENCODE), l) b = s.decode(_ENCODE) return b.join(bin_args).encode(_ENCODE) logfile_open = open else: path_join = os.path.join str_join = str.join def logfile_open(*args): return open(*args, errors='replace')
2.96875
3
test/test_vom.py
usamaahmadkhan/vpp
0
901
#!/usr/bin/env python """ VAPI test """ import unittest import os import signal from framework import VppTestCase, running_extended_tests, \ VppTestRunner, Worker @unittest.skipUnless(running_extended_tests(), "part of extended tests") class VOMTestCase(VppTestCase): """ VPP Object Model Test """ def test_vom_cpp(self): """ run C++ VOM tests """ var = "TEST_DIR" built_root = os.getenv(var, None) self.assertIsNotNone(built_root, "Environment variable `%s' not set" % var) executable = "%s/build/vom_test/vom_test" % built_root worker = Worker( [executable, "vpp object model", self.shm_prefix], self.logger) worker.start() timeout = 120 worker.join(timeout) self.logger.info("Worker result is `%s'" % worker.result) error = False if worker.result is None: try: error = True self.logger.error( "Timeout! Worker did not finish in %ss" % timeout) os.killpg(os.getpgid(worker.process.pid), signal.SIGTERM) worker.join() except: raise Exception("Couldn't kill worker-spawned process") if error: raise Exception( "Timeout! Worker did not finish in %ss" % timeout) self.assert_equal(worker.result, 0, "Binary test return code") if __name__ == '__main__': unittest.main(testRunner=VppTestRunner)
2.265625
2
locations/spiders/tesco.py
bealbrown/allhours
0
902
<reponame>bealbrown/allhours<filename>locations/spiders/tesco.py import json import re import scrapy from locations.hourstudy import inputoutput DAYS = { 'mo': 'Mo', 'tu': 'Tu', 'we': 'We', 'fr': 'Fr', 'th': 'Th', 'sa': 'Sa', 'su': 'Su', } class TescoSpider(scrapy.Spider): name = "tesco" allowed_domains = ["tescolocation.api.tesco.com"] def store_hours(self, store_hours): clean_time='' for key, value in store_hours.items(): if('isOpen' in value and 'open' in value and 'close' in value): if(value['isOpen']=='true'): clean_time = clean_time + DAYS[key]+' '+value['open'][0:2]+':'+value['open'][2:]+'-'+value['close'][0:2]+':'+value['close'][2:]+';' else: clean_time = clean_time + DAYS[key]+' '+'Closed'+';' return clean_time def start_requests(self): url = 'https://tescolocation.api.tesco.com/v3/locations/search?offset=0&limit=1000000&sort=near:%2251.499207299999995,-0.08800609999999999%22&filter=category:Store%20AND%20isoCountryCode:x-uk&fields=name,geo,openingHours,altIds.branchNumber,contact' headers = { 'Accept-Language': 'en-US,en;q=0.9', 'Origin': 'https://www.tesco.com', 'Accept-Encoding': 'gzip, deflate, br', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Referer': 'https://www.kfc.com/store-locator?query=90210', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'X-Requested-With': 'XMLHttpRequest', 'x-appkey':'store-locator-web-cde' } yield scrapy.http.FormRequest( url=url, method='GET', headers=headers, callback=self.parse ) def parse(self, response): data = json.loads(response.body_as_unicode()) stores = data['results'] for store in stores: addr_full='' for add in store['location']['contact']['address']['lines']: addr_full=addr_full+' '+add['text'] properties = { 'ref': store['location']['id'], 'name': store['location']['name'], 'addr_full': addr_full, 'city': store['location']['contact']['address']['town'], 'state': '', 'country':'United Kingdom', 'postcode': store['location']['contact']['address']['postcode'], 'lat': store['location']['geo']['coordinates']['latitude'], 'lon': store['location']['geo']['coordinates']['longitude'], 'phone': store['location']['contact']['phoneNumbers'][0]['number'], } opening_hours = self.store_hours(store['location']['openingHours'][0]['standardOpeningHours']) if opening_hours: properties['opening_hours'] = opening_hours raw = store['location']['openingHours'][0]['standardOpeningHours'] formatted = opening_hours yield inputoutput(raw,formatted) # yield inputoutput(**properties)
2.8125
3
astropy/table/serialize.py
tacaswell/astropy
1
903
from importlib import import_module import re from copy import deepcopy from collections import OrderedDict from astropy.utils.data_info import MixinInfo from .column import Column from .table import Table, QTable, has_info_class from astropy.units.quantity import QuantityInfo __construct_mixin_classes = ('astropy.time.core.Time', 'astropy.time.core.TimeDelta', 'astropy.units.quantity.Quantity', 'astropy.coordinates.angles.Latitude', 'astropy.coordinates.angles.Longitude', 'astropy.coordinates.angles.Angle', 'astropy.coordinates.distances.Distance', 'astropy.coordinates.earth.EarthLocation', 'astropy.coordinates.sky_coordinate.SkyCoord', 'astropy.table.table.NdarrayMixin', 'astropy.table.column.MaskedColumn') class SerializedColumn(dict): """ Subclass of dict that is a used in the representation to contain the name (and possible other info) for a mixin attribute (either primary data or an array-like attribute) that is serialized as a column in the table. Normally contains the single key ``name`` with the name of the column in the table. """ pass def _represent_mixin_as_column(col, name, new_cols, mixin_cols, exclude_classes=()): """Carry out processing needed to serialize ``col`` in an output table consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This relies on the object determine if any transformation is required and may depend on the ``serialize_method`` and ``serialize_context`` context variables. For instance a ``MaskedColumn`` may be stored directly to FITS, but can also be serialized as separate data and mask columns. This function builds up a list of plain columns in the ``new_cols`` arg (which is passed as a persistent list). This includes both plain columns from the original table and plain columns that represent data from serialized columns (e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column). For serialized columns the ``mixin_cols`` dict is updated with required attributes and information to subsequently reconstruct the table. Table mixin columns are always serialized and get represented by one or more data columns. In earlier versions of the code *only* mixin columns were serialized, hence the use within this code of "mixin" to imply serialization. Starting with version 3.1, the non-mixin ``MaskedColumn`` can also be serialized. """ obj_attrs = col.info._represent_as_dict() ordered_keys = col.info._represent_as_dict_attrs # If serialization is not required (see function docstring above) # or explicitly specified as excluded, then treat as a normal column. if not obj_attrs or col.__class__ in exclude_classes: new_cols.append(col) return # Subtlety here is handling mixin info attributes. The basic list of such # attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'. # - name: handled directly [DON'T store] # - unit: DON'T store if this is a parent attribute # - dtype: captured in plain Column if relevant [DON'T store] # - format: possibly irrelevant but settable post-object creation [DO store] # - description: DO store # - meta: DO store info = {} for attr, nontrivial, xform in (('unit', lambda x: x is not None and x != '', str), ('format', lambda x: x is not None, None), ('description', lambda x: x is not None, None), ('meta', lambda x: x, None)): col_attr = getattr(col.info, attr) if nontrivial(col_attr): info[attr] = xform(col_attr) if xform else col_attr data_attrs = [key for key in ordered_keys if key in obj_attrs and getattr(obj_attrs[key], 'shape', ())[:1] == col.shape[:1]] for data_attr in data_attrs: data = obj_attrs[data_attr] # New column name combines the old name and attribute # (e.g. skycoord.ra, skycoord.dec).unless it is the primary data # attribute for the column (e.g. value for Quantity or data # for MaskedColumn) if data_attr == col.info._represent_as_dict_primary_data: new_name = name else: new_name = name + '.' + data_attr if not has_info_class(data, MixinInfo): new_cols.append(Column(data, name=new_name, **info)) obj_attrs[data_attr] = SerializedColumn({'name': new_name}) else: # recurse. This will define obj_attrs[new_name]. _represent_mixin_as_column(data, new_name, new_cols, obj_attrs) obj_attrs[data_attr] = SerializedColumn(obj_attrs.pop(new_name)) # Strip out from info any attributes defined by the parent for attr in col.info.attrs_from_parent: if attr in info: del info[attr] if info: obj_attrs['__info__'] = info # Store the fully qualified class name obj_attrs['__class__'] = col.__module__ + '.' + col.__class__.__name__ mixin_cols[name] = obj_attrs def represent_mixins_as_columns(tbl, exclude_classes=()): """Represent input Table ``tbl`` using only `~astropy.table.Column` or `~astropy.table.MaskedColumn` objects. This function represents any mixin columns like `~astropy.time.Time` in ``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns a new Table. A single mixin column may be split into multiple column components as needed for fully representing the column. This includes the possibility of recursive splitting, as shown in the example below. The new column names are formed as ``<column_name>.<component>``, e.g. ``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``. In addition to splitting columns, this function updates the table ``meta`` dictionary to include a dict named ``__serialized_columns__`` which provides additional information needed to construct the original mixin columns from the split columns. This function is used by astropy I/O when writing tables to ECSV, FITS, HDF5 formats. Note that if the table does not include any mixin columns then the original table is returned with no update to ``meta``. Parameters ---------- tbl : `~astropy.table.Table` or subclass Table to represent mixins as Columns exclude_classes : tuple of classes Exclude any mixin columns which are instannces of any classes in the tuple Returns ------- tbl : `~astropy.table.Table` New Table with updated columns, or else the original input ``tbl`` Examples -------- >>> from astropy.table import Table, represent_mixins_as_columns >>> from astropy.time import Time >>> from astropy.coordinates import SkyCoord >>> x = [100.0, 200.0] >>> obstime = Time([1999.0, 2000.0], format='jyear') >>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime) >>> tbl = Table([sc, x], names=['sc', 'x']) >>> represent_mixins_as_columns(tbl) <Table length=2> sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x deg deg float64 float64 float64 float64 float64 ------- ------- -------------- -------------- ------- 1.0 3.0 2451180.0 -0.25 100.0 2.0 4.0 2451545.0 0.0 200.0 """ # Dict of metadata for serializing each column, keyed by column name. # Gets filled in place by _represent_mixin_as_column(). mixin_cols = {} # List of columns for the output table. For plain Column objects # this will just be the original column object. new_cols = [] # Go through table columns and represent each column as one or more # plain Column objects (in new_cols) + metadata (in mixin_cols). for col in tbl.itercols(): _represent_mixin_as_column(col, col.info.name, new_cols, mixin_cols, exclude_classes=exclude_classes) # If no metadata was created then just return the original table. if not mixin_cols: return tbl meta = deepcopy(tbl.meta) meta['__serialized_columns__'] = mixin_cols out = Table(new_cols, meta=meta, copy=False) return out def _construct_mixin_from_obj_attrs_and_info(obj_attrs, info): cls_full_name = obj_attrs.pop('__class__') # If this is a supported class then import the class and run # the _construct_from_col method. Prevent accidentally running # untrusted code by only importing known astropy classes. if cls_full_name not in __construct_mixin_classes: raise ValueError('unsupported class for construct {}'.format(cls_full_name)) mod_name, cls_name = re.match(r'(.+)\.(\w+)', cls_full_name).groups() module = import_module(mod_name) cls = getattr(module, cls_name) for attr, value in info.items(): if attr in cls.info.attrs_from_parent: obj_attrs[attr] = value mixin = cls.info._construct_from_dict(obj_attrs) for attr, value in info.items(): if attr not in obj_attrs: setattr(mixin.info, attr, value) return mixin class _TableLite(OrderedDict): """ Minimal table-like object for _construct_mixin_from_columns. This allows manipulating the object like a Table but without the actual overhead for a full Table. More pressing, there is an issue with constructing MaskedColumn, where the encoded Column components (data, mask) are turned into a MaskedColumn. When this happens in a real table then all other columns are immediately Masked and a warning is issued. This is not desirable. """ def add_column(self, col, index=0): colnames = self.colnames self[col.info.name] = col for ii, name in enumerate(colnames): if ii >= index: self.move_to_end(name) @property def colnames(self): return list(self.keys()) def itercols(self): return self.values() def _construct_mixin_from_columns(new_name, obj_attrs, out): data_attrs_map = {} for name, val in obj_attrs.items(): if isinstance(val, SerializedColumn): if 'name' in val: data_attrs_map[val['name']] = name else: _construct_mixin_from_columns(name, val, out) data_attrs_map[name] = name for name in data_attrs_map.values(): del obj_attrs[name] # Get the index where to add new column idx = min(out.colnames.index(name) for name in data_attrs_map) # Name is the column name in the table (e.g. "coord.ra") and # data_attr is the object attribute name (e.g. "ra"). A different # example would be a formatted time object that would have (e.g.) # "time_col" and "value", respectively. for name, data_attr in data_attrs_map.items(): col = out[name] obj_attrs[data_attr] = col del out[name] info = obj_attrs.pop('__info__', {}) if len(data_attrs_map) == 1: # col is the first and only serialized column; in that case, use info # stored on the column. for attr, nontrivial in (('unit', lambda x: x not in (None, '')), ('format', lambda x: x is not None), ('description', lambda x: x is not None), ('meta', lambda x: x)): col_attr = getattr(col.info, attr) if nontrivial(col_attr): info[attr] = col_attr info['name'] = new_name col = _construct_mixin_from_obj_attrs_and_info(obj_attrs, info) out.add_column(col, index=idx) def _construct_mixins_from_columns(tbl): if '__serialized_columns__' not in tbl.meta: return tbl meta = tbl.meta.copy() mixin_cols = meta.pop('__serialized_columns__') out = _TableLite(tbl.columns) for new_name, obj_attrs in mixin_cols.items(): _construct_mixin_from_columns(new_name, obj_attrs, out) # If no quantity subclasses are in the output then output as Table. # For instance ascii.read(file, format='ecsv') doesn't specify an # output class and should return the minimal table class that # represents the table file. has_quantities = any(isinstance(col.info, QuantityInfo) for col in out.itercols()) out_cls = QTable if has_quantities else Table return out_cls(list(out.values()), names=out.colnames, copy=False, meta=meta)
2.3125
2
UVa 573 - The Snail/sample/main.py
tadvi/uva
1
904
''' Created on Jun 18, 2013 @author: <NAME> All rights reserved. ''' import time from multiprocessing.pool import Pool parallelSolve = False infinity = 1 << 30 def solve(par): H, U, D, F = par day = 0 amountRise = U currH = 0 while True: amountRise = U * (1 - 0.01 * F * day) currH += amountRise if currH > H: return 'success on day %d' % (day + 1) currH -= D if currH < 0: return 'failure on day %d' % (day + 1) day += 1 class Solver: def getInput(self): self.input = [] self.numOfTests = 0 while True: H, U, D, F = map(int, self.fIn.readline().strip().split()) if H == 0: break self.numOfTests += 1 self.input.append((H, U, D, F)) def __init__(self): self.fIn = open('input.txt') self.fOut = open('output.txt', 'w') self.results = [] def parallel(self): self.getInput() p = Pool(4) millis1 = int(round(time.time() * 1000)) self.results = p.map(solve, self.input) millis2 = int(round(time.time() * 1000)) print("Time in milliseconds: %d " % (millis2 - millis1)) self.makeOutput() def sequential(self): self.getInput() millis1 = int(round(time.time() * 1000)) for i in self.input: self.results.append(solve(i)) millis2 = int(round(time.time() * 1000)) print("Time in milliseconds: %d " % (millis2 - millis1)) self.makeOutput() def makeOutput(self): for test in range(self.numOfTests): self.fOut.write("Case #%d: %s\n" % (test + 1, self.results[test])) self.fIn.close() self.fOut.close() if __name__ == '__main__': solver = Solver() if parallelSolve: solver.parallel() else: solver.sequential()
3.15625
3
scibert/models/text_classifier.py
tomhoper/scibert
1,143
905
from typing import Dict, Optional, List, Any import torch import torch.nn.functional as F from allennlp.data import Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder from allennlp.nn import InitializerApplicator, RegularizerApplicator from allennlp.nn import util from allennlp.training.metrics import CategoricalAccuracy, F1Measure from overrides import overrides @Model.register("text_classifier") class TextClassifier(Model): """ Implements a basic text classifier: 1) Embed tokens using `text_field_embedder` 2) Seq2SeqEncoder, e.g. BiLSTM 3) Append the first and last encoder states 4) Final feedforward layer Optimized with CrossEntropyLoss. Evaluated with CategoricalAccuracy & F1. """ def __init__(self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, text_encoder: Seq2SeqEncoder, classifier_feedforward: FeedForward, verbose_metrics: False, initializer: InitializerApplicator = InitializerApplicator(), regularizer: Optional[RegularizerApplicator] = None, ) -> None: super(TextClassifier, self).__init__(vocab, regularizer) self.text_field_embedder = text_field_embedder self.num_classes = self.vocab.get_vocab_size("labels") self.text_encoder = text_encoder self.classifier_feedforward = classifier_feedforward self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim() , self.num_classes) self.label_accuracy = CategoricalAccuracy() self.label_f1_metrics = {} self.verbose_metrics = verbose_metrics for i in range(self.num_classes): self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i) self.loss = torch.nn.CrossEntropyLoss() self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True) initializer(self) @overrides def forward(self, text: Dict[str, torch.LongTensor], label: torch.IntTensor = None, metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]: """ Parameters ---------- text : Dict[str, torch.LongTensor] From a ``TextField`` label : torch.IntTensor, optional (default = None) From a ``LabelField`` metadata : ``List[Dict[str, Any]]``, optional, (default = None) Metadata containing the original tokenization of the premise and hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively. Returns ------- An output dictionary consisting of: label_logits : torch.FloatTensor A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the label. label_probs : torch.FloatTensor A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the label. loss : torch.FloatTensor, optional A scalar loss to be optimised. """ embedded_text = self.text_field_embedder(text) mask = util.get_text_field_mask(text) encoded_text = self.text_encoder(embedded_text, mask) pooled = self.pool(encoded_text, mask) ff_hidden = self.classifier_feedforward(pooled) logits = self.prediction_layer(ff_hidden) class_probs = F.softmax(logits, dim=1) output_dict = {"logits": logits} if label is not None: loss = self.loss(logits, label) output_dict["loss"] = loss # compute F1 per label for i in range(self.num_classes): metric = self.label_f1_metrics[self.vocab.get_token_from_index(index=i, namespace="labels")] metric(class_probs, label) self.label_accuracy(logits, label) return output_dict @overrides def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: class_probabilities = F.softmax(output_dict['logits'], dim=-1) output_dict['class_probs'] = class_probabilities return output_dict def get_metrics(self, reset: bool = False) -> Dict[str, float]: metric_dict = {} sum_f1 = 0.0 for name, metric in self.label_f1_metrics.items(): metric_val = metric.get_metric(reset) if self.verbose_metrics: metric_dict[name + '_P'] = metric_val[0] metric_dict[name + '_R'] = metric_val[1] metric_dict[name + '_F1'] = metric_val[2] sum_f1 += metric_val[2] names = list(self.label_f1_metrics.keys()) total_len = len(names) average_f1 = sum_f1 / total_len metric_dict['average_F1'] = average_f1 metric_dict['accuracy'] = self.label_accuracy.get_metric(reset) return metric_dict
2.59375
3
plugins/template/tasks.py
crotwell/cmd2
469
906
# # -*- coding: utf-8 -*- """Development related tasks to be run with 'invoke'""" import os import pathlib import shutil import invoke TASK_ROOT = pathlib.Path(__file__).resolve().parent TASK_ROOT_STR = str(TASK_ROOT) # shared function def rmrf(items, verbose=True): """Silently remove a list of directories or files""" if isinstance(items, str): items = [items] for item in items: if verbose: print("Removing {}".format(item)) shutil.rmtree(item, ignore_errors=True) # rmtree doesn't remove bare files try: os.remove(item) except FileNotFoundError: pass # create namespaces namespace = invoke.Collection() namespace_clean = invoke.Collection('clean') namespace.add_collection(namespace_clean, 'clean') ##### # # pytest, pylint, and codecov # ##### @invoke.task def pytest(context, junit=False, pty=True, append_cov=False): """Run tests and code coverage using pytest""" ROOT_PATH = TASK_ROOT.parent.parent with context.cd(str(ROOT_PATH)): command_str = 'pytest --cov=cmd2_myplugin --cov-report=term --cov-report=html' if append_cov: command_str += ' --cov-append' if junit: command_str += ' --junitxml=junit/test-results.xml' command_str += ' ' + str((TASK_ROOT / 'tests').relative_to(ROOT_PATH)) context.run(command_str, pty=pty) namespace.add_task(pytest) @invoke.task def pytest_clean(context): """Remove pytest cache and code coverage files and directories""" # pylint: disable=unused-argument with context.cd(TASK_ROOT_STR): dirs = ['.pytest_cache', '.cache', '.coverage'] rmrf(dirs) namespace_clean.add_task(pytest_clean, 'pytest') @invoke.task def pylint(context): """Check code quality using pylint""" context.run('pylint --rcfile=cmd2_myplugin/pylintrc cmd2_myplugin') namespace.add_task(pylint) @invoke.task def pylint_tests(context): """Check code quality of test suite using pylint""" context.run('pylint --rcfile=tests/pylintrc tests') namespace.add_task(pylint_tests) ##### # # build and distribute # ##### BUILDDIR = 'build' DISTDIR = 'dist' @invoke.task def build_clean(context): """Remove the build directory""" # pylint: disable=unused-argument rmrf(BUILDDIR) namespace_clean.add_task(build_clean, 'build') @invoke.task def dist_clean(context): """Remove the dist directory""" # pylint: disable=unused-argument rmrf(DISTDIR) namespace_clean.add_task(dist_clean, 'dist') @invoke.task def eggs_clean(context): """Remove egg directories""" # pylint: disable=unused-argument dirs = set() dirs.add('.eggs') for name in os.listdir(os.curdir): if name.endswith('.egg-info'): dirs.add(name) if name.endswith('.egg'): dirs.add(name) rmrf(dirs) namespace_clean.add_task(eggs_clean, 'eggs') @invoke.task def bytecode_clean(context): """Remove __pycache__ directories and *.pyc files""" # pylint: disable=unused-argument dirs = set() for root, dirnames, files in os.walk(os.curdir): if '__pycache__' in dirnames: dirs.add(os.path.join(root, '__pycache__')) for file in files: if file.endswith(".pyc"): dirs.add(os.path.join(root, file)) print("Removing __pycache__ directories and .pyc files") rmrf(dirs, verbose=False) namespace_clean.add_task(bytecode_clean, 'bytecode') # # make a dummy clean task which runs all the tasks in the clean namespace clean_tasks = list(namespace_clean.tasks.values()) @invoke.task(pre=list(namespace_clean.tasks.values()), default=True) def clean_all(context): """Run all clean tasks""" # pylint: disable=unused-argument pass namespace_clean.add_task(clean_all, 'all') @invoke.task(pre=[clean_all]) def sdist(context): """Create a source distribution""" context.run('python setup.py sdist') namespace.add_task(sdist) @invoke.task(pre=[clean_all]) def wheel(context): """Build a wheel distribution""" context.run('python setup.py bdist_wheel') namespace.add_task(wheel) # # these two tasks are commented out so you don't # accidentally run them and upload this template to pypi # # @invoke.task(pre=[sdist, wheel]) # def pypi(context): # """Build and upload a distribution to pypi""" # context.run('twine upload dist/*') # namespace.add_task(pypi) # @invoke.task(pre=[sdist, wheel]) # def pypi_test(context): # """Build and upload a distribution to https://test.pypi.org""" # context.run('twine upload --repository-url https://test.pypi.org/legacy/ dist/*') # namespace.add_task(pypi_test)
2.28125
2
scripts/automation/trex_control_plane/interactive/trex/examples/stl/ndr_plugin.py
timgates42/trex-core
956
907
import stl_path class MyNDRPlugin(): def __init__(self): pass def pre_iteration(self, finding_max_rate, run_results=None, **kwargs): """ Function ran before each iteration. :parameters: finding_max_rate: boolean Indicates whether we are running for the first time, trying to find the max rate. In this is the case, the run_results will be None. run_results: dict A dictionary that contains the following keys: queue_full_percentage: Percentage of packets that are queued. drop_rate_percentage: Percentage of packets that were dropped. rate_tx_bps: TX rate in bps. rate_rx_bps: RX rate in bps. tx_util: TX utilization percentage. latency: Latency groups. cpu_util: CPU utilization percentage. tx_pps: TX in pps. rx_pps: RX in pps. tx_bps: TX in bps. rx_bps: RX in bps. bw_per_core: Bandwidth per core. rate_p: Running rate in percentage out of max. total_tx_L1: Total TX L1. total_rx_L1: Total RX L1. iteration: Description of iteration (not necessarily a number) Pay attention: The rate is of the upcoming iteration. All the rest are of the previous iteration. kwargs: dict List of tunables passed as parameters. """ # Pre iteration function. This function will run before TRex transmits to the DUT. # Could use this to better prepare the DUT, for example define shapers, policers, increase buffers and queues. # You can receive tunables in the command line, through the kwargs argument. pass def post_iteration(self, finding_max_rate, run_results, **kwargs): """ Function ran after each iteration. :parameters: finding_max_rate: boolean Indicates whether we are running for the first time, trying to find the max rate. If this is the case, some values of run_results (like iteration for example) are not relevant. run_results: dict A dictionary that contains the following keys: queue_full_percentage: Percentage of packets that are queued. drop_rate_percentage: Percentage of packets that were dropped. rate_tx_bps: TX rate in bps. rate_rx_bps: RX rate in bps. tx_util: TX utilization percentage. latency: Latency groups. cpu_util: CPU utilization percentage. tx_pps: TX in pps. rx_pps: RX in pps. tx_bps: TX in bps. rx_bps: RX in bps. bw_per_core: Bandwidth per core. rate_p: Running rate in percentage out of max. total_tx_L1: Total TX L1. total_rx_L1: Total RX L1. iteration: Description of iteration (not necessarily a number) kwargs: dict List of tunables passed as parameters. :returns: bool: should stop the benchmarking or not. """ # Post iteration function. This function will run after TRex transmits to the DUT. # Could use this to decide if to continue the benchmark after querying the DUT post run. The DUT might be overheated or any other thing that might make you want to stop the run. # You can receive tunables in the command line, through the kwargs argument. should_stop = False return should_stop # dynamic load of python module def register(): return MyNDRPlugin()
2.953125
3
homeassistant/components/epsonworkforce/sensor.py
maexono/home-assistant
2
908
"""Support for Epson Workforce Printer.""" from datetime import timedelta import logging import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA from homeassistant.const import CONF_HOST, CONF_MONITORED_CONDITIONS from homeassistant.exceptions import PlatformNotReady import homeassistant.helpers.config_validation as cv from homeassistant.helpers.entity import Entity REQUIREMENTS = ['epsonprinter==0.0.8'] _LOGGER = logging.getLogger(__name__) MONITORED_CONDITIONS = { 'black': ['Inklevel Black', '%', 'mdi:water'], 'magenta': ['Inklevel Magenta', '%', 'mdi:water'], 'cyan': ['Inklevel Cyan', '%', 'mdi:water'], 'yellow': ['Inklevel Yellow', '%', 'mdi:water'], 'clean': ['Inklevel Cleaning', '%', 'mdi:water'], } PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Required(CONF_MONITORED_CONDITIONS): vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]), }) SCAN_INTERVAL = timedelta(minutes=60) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the cartridge sensor.""" host = config.get(CONF_HOST) from epsonprinter_pkg.epsonprinterapi import EpsonPrinterAPI api = EpsonPrinterAPI(host) if not api.available: raise PlatformNotReady() sensors = [EpsonPrinterCartridge(api, condition) for condition in config[CONF_MONITORED_CONDITIONS]] add_devices(sensors, True) class EpsonPrinterCartridge(Entity): """Representation of a cartridge sensor.""" def __init__(self, api, cartridgeidx): """Initialize a cartridge sensor.""" self._api = api self._id = cartridgeidx self._name = MONITORED_CONDITIONS[self._id][0] self._unit = MONITORED_CONDITIONS[self._id][1] self._icon = MONITORED_CONDITIONS[self._id][2] @property def name(self): """Return the name of the sensor.""" return self._name @property def icon(self): """Icon to use in the frontend, if any.""" return self._icon @property def unit_of_measurement(self): """Return the unit the value is expressed in.""" return self._unit @property def state(self): """Return the state of the device.""" return self._api.getSensorValue(self._id) @property def available(self): """Could the device be accessed during the last update call.""" return self._api.available def update(self): """Get the latest data from the Epson printer.""" self._api.update()
2.28125
2
bot/exts/help_channels/_cog.py
bast0006/bot
0
909
<gh_stars>0 import asyncio import logging import random import typing as t from datetime import datetime, timezone from operator import attrgetter import discord import discord.abc from discord.ext import commands from bot import constants from bot.bot import Bot from bot.exts.help_channels import _caches, _channel, _cooldown, _message, _name, _stats from bot.utils import channel as channel_utils, lock, scheduling log = logging.getLogger(__name__) NAMESPACE = "help" HELP_CHANNEL_TOPIC = """ This is a Python help channel. You can claim your own help channel in the Python Help: Available category. """ class HelpChannels(commands.Cog): """ Manage the help channel system of the guild. The system is based on a 3-category system: Available Category * Contains channels which are ready to be occupied by someone who needs help * Will always contain `constants.HelpChannels.max_available` channels; refilled automatically from the pool of dormant channels * Prioritise using the channels which have been dormant for the longest amount of time * If there are no more dormant channels, the bot will automatically create a new one * If there are no dormant channels to move, helpers will be notified (see `notify()`) * When a channel becomes available, the dormant embed will be edited to show `AVAILABLE_MSG` * User can only claim a channel at an interval `constants.HelpChannels.claim_minutes` * To keep track of cooldowns, user which claimed a channel will have a temporary role In Use Category * Contains all channels which are occupied by someone needing help * Channel moves to dormant category after `constants.HelpChannels.idle_minutes` of being idle * Command can prematurely mark a channel as dormant * Channel claimant is allowed to use the command * Allowed roles for the command are configurable with `constants.HelpChannels.cmd_whitelist` * When a channel becomes dormant, an embed with `DORMANT_MSG` will be sent Dormant Category * Contains channels which aren't in use * Channels are used to refill the Available category Help channels are named after the chemical elements in `bot/resources/elements.json`. """ def __init__(self, bot: Bot): self.bot = bot self.scheduler = scheduling.Scheduler(self.__class__.__name__) # Categories self.available_category: discord.CategoryChannel = None self.in_use_category: discord.CategoryChannel = None self.dormant_category: discord.CategoryChannel = None # Queues self.channel_queue: asyncio.Queue[discord.TextChannel] = None self.name_queue: t.Deque[str] = None self.last_notification: t.Optional[datetime] = None # Asyncio stuff self.queue_tasks: t.List[asyncio.Task] = [] self.init_task = self.bot.loop.create_task(self.init_cog()) def cog_unload(self) -> None: """Cancel the init task and scheduled tasks when the cog unloads.""" log.trace("Cog unload: cancelling the init_cog task") self.init_task.cancel() log.trace("Cog unload: cancelling the channel queue tasks") for task in self.queue_tasks: task.cancel() self.scheduler.cancel_all() @lock.lock_arg(NAMESPACE, "message", attrgetter("channel.id")) @lock.lock_arg(NAMESPACE, "message", attrgetter("author.id")) @lock.lock_arg(f"{NAMESPACE}.unclaim", "message", attrgetter("author.id"), wait=True) async def claim_channel(self, message: discord.Message) -> None: """ Claim the channel in which the question `message` was sent. Move the channel to the In Use category and pin the `message`. Add a cooldown to the claimant to prevent them from asking another question. Lastly, make a new channel available. """ log.info(f"Channel #{message.channel} was claimed by `{message.author.id}`.") await self.move_to_in_use(message.channel) await _cooldown.revoke_send_permissions(message.author, self.scheduler) await _message.pin(message) try: await _message.dm_on_open(message) except Exception as e: log.warning("Error occurred while sending DM:", exc_info=e) # Add user with channel for dormant check. await _caches.claimants.set(message.channel.id, message.author.id) self.bot.stats.incr("help.claimed") # Must use a timezone-aware datetime to ensure a correct POSIX timestamp. timestamp = datetime.now(timezone.utc).timestamp() await _caches.claim_times.set(message.channel.id, timestamp) await _caches.unanswered.set(message.channel.id, True) # Not awaited because it may indefinitely hold the lock while waiting for a channel. scheduling.create_task(self.move_to_available(), name=f"help_claim_{message.id}") def create_channel_queue(self) -> asyncio.Queue: """ Return a queue of dormant channels to use for getting the next available channel. The channels are added to the queue in a random order. """ log.trace("Creating the channel queue.") channels = list(_channel.get_category_channels(self.dormant_category)) random.shuffle(channels) log.trace("Populating the channel queue with channels.") queue = asyncio.Queue() for channel in channels: queue.put_nowait(channel) return queue async def create_dormant(self) -> t.Optional[discord.TextChannel]: """ Create and return a new channel in the Dormant category. The new channel will sync its permission overwrites with the category. Return None if no more channel names are available. """ log.trace("Getting a name for a new dormant channel.") try: name = self.name_queue.popleft() except IndexError: log.debug("No more names available for new dormant channels.") return None log.debug(f"Creating a new dormant channel named {name}.") return await self.dormant_category.create_text_channel(name, topic=HELP_CHANNEL_TOPIC) async def close_check(self, ctx: commands.Context) -> bool: """Return True if the channel is in use and the user is the claimant or has a whitelisted role.""" if ctx.channel.category != self.in_use_category: log.debug(f"{ctx.author} invoked command 'close' outside an in-use help channel") return False if await _caches.claimants.get(ctx.channel.id) == ctx.author.id: log.trace(f"{ctx.author} is the help channel claimant, passing the check for dormant.") self.bot.stats.incr("help.dormant_invoke.claimant") return True log.trace(f"{ctx.author} is not the help channel claimant, checking roles.") has_role = await commands.has_any_role(*constants.HelpChannels.cmd_whitelist).predicate(ctx) if has_role: self.bot.stats.incr("help.dormant_invoke.staff") return has_role @commands.command(name="close", aliases=["dormant", "solved"], enabled=False) async def close_command(self, ctx: commands.Context) -> None: """ Make the current in-use help channel dormant. May only be invoked by the channel's claimant or by staff. """ # Don't use a discord.py check because the check needs to fail silently. if await self.close_check(ctx): log.info(f"Close command invoked by {ctx.author} in #{ctx.channel}.") await self.unclaim_channel(ctx.channel, is_auto=False) async def get_available_candidate(self) -> discord.TextChannel: """ Return a dormant channel to turn into an available channel. If no channel is available, wait indefinitely until one becomes available. """ log.trace("Getting an available channel candidate.") try: channel = self.channel_queue.get_nowait() except asyncio.QueueEmpty: log.info("No candidate channels in the queue; creating a new channel.") channel = await self.create_dormant() if not channel: log.info("Couldn't create a candidate channel; waiting to get one from the queue.") notify_channel = self.bot.get_channel(constants.HelpChannels.notify_channel) last_notification = await _message.notify(notify_channel, self.last_notification) if last_notification: self.last_notification = last_notification self.bot.stats.incr("help.out_of_channel_alerts") channel = await self.wait_for_dormant_channel() return channel async def init_available(self) -> None: """Initialise the Available category with channels.""" log.trace("Initialising the Available category with channels.") channels = list(_channel.get_category_channels(self.available_category)) missing = constants.HelpChannels.max_available - len(channels) # If we've got less than `max_available` channel available, we should add some. if missing > 0: log.trace(f"Moving {missing} missing channels to the Available category.") for _ in range(missing): await self.move_to_available() # If for some reason we have more than `max_available` channels available, # we should move the superfluous ones over to dormant. elif missing < 0: log.trace(f"Moving {abs(missing)} superfluous available channels over to the Dormant category.") for channel in channels[:abs(missing)]: await self.unclaim_channel(channel) async def init_categories(self) -> None: """Get the help category objects. Remove the cog if retrieval fails.""" log.trace("Getting the CategoryChannel objects for the help categories.") try: self.available_category = await channel_utils.try_get_channel( constants.Categories.help_available ) self.in_use_category = await channel_utils.try_get_channel( constants.Categories.help_in_use ) self.dormant_category = await channel_utils.try_get_channel( constants.Categories.help_dormant ) except discord.HTTPException: log.exception("Failed to get a category; cog will be removed") self.bot.remove_cog(self.qualified_name) async def init_cog(self) -> None: """Initialise the help channel system.""" log.trace("Waiting for the guild to be available before initialisation.") await self.bot.wait_until_guild_available() log.trace("Initialising the cog.") await self.init_categories() await _cooldown.check_cooldowns(self.scheduler) self.channel_queue = self.create_channel_queue() self.name_queue = _name.create_name_queue( self.available_category, self.in_use_category, self.dormant_category, ) log.trace("Moving or rescheduling in-use channels.") for channel in _channel.get_category_channels(self.in_use_category): await self.move_idle_channel(channel, has_task=False) # Prevent the command from being used until ready. # The ready event wasn't used because channels could change categories between the time # the command is invoked and the cog is ready (e.g. if move_idle_channel wasn't called yet). # This may confuse users. So would potentially long delays for the cog to become ready. self.close_command.enabled = True await self.init_available() _stats.report_counts() log.info("Cog is ready!") async def move_idle_channel(self, channel: discord.TextChannel, has_task: bool = True) -> None: """ Make the `channel` dormant if idle or schedule the move if still active. If `has_task` is True and rescheduling is required, the extant task to make the channel dormant will first be cancelled. """ log.trace(f"Handling in-use channel #{channel} ({channel.id}).") if not await _message.is_empty(channel): idle_seconds = constants.HelpChannels.idle_minutes * 60 else: idle_seconds = constants.HelpChannels.deleted_idle_minutes * 60 time_elapsed = await _channel.get_idle_time(channel) if time_elapsed is None or time_elapsed >= idle_seconds: log.info( f"#{channel} ({channel.id}) is idle longer than {idle_seconds} seconds " f"and will be made dormant." ) await self.unclaim_channel(channel) else: # Cancel the existing task, if any. if has_task: self.scheduler.cancel(channel.id) delay = idle_seconds - time_elapsed log.info( f"#{channel} ({channel.id}) is still active; " f"scheduling it to be moved after {delay} seconds." ) self.scheduler.schedule_later(delay, channel.id, self.move_idle_channel(channel)) async def move_to_available(self) -> None: """Make a channel available.""" log.trace("Making a channel available.") channel = await self.get_available_candidate() log.info(f"Making #{channel} ({channel.id}) available.") await _message.send_available_message(channel) log.trace(f"Moving #{channel} ({channel.id}) to the Available category.") await _channel.move_to_bottom( channel=channel, category_id=constants.Categories.help_available, ) _stats.report_counts() async def move_to_dormant(self, channel: discord.TextChannel) -> None: """Make the `channel` dormant.""" log.info(f"Moving #{channel} ({channel.id}) to the Dormant category.") await _channel.move_to_bottom( channel=channel, category_id=constants.Categories.help_dormant, ) log.trace(f"Sending dormant message for #{channel} ({channel.id}).") embed = discord.Embed(description=_message.DORMANT_MSG) await channel.send(embed=embed) log.trace(f"Pushing #{channel} ({channel.id}) into the channel queue.") self.channel_queue.put_nowait(channel) _stats.report_counts() @lock.lock_arg(f"{NAMESPACE}.unclaim", "channel") async def unclaim_channel(self, channel: discord.TextChannel, *, is_auto: bool = True) -> None: """ Unclaim an in-use help `channel` to make it dormant. Unpin the claimant's question message and move the channel to the Dormant category. Remove the cooldown role from the channel claimant if they have no other channels claimed. Cancel the scheduled cooldown role removal task. Set `is_auto` to True if the channel was automatically closed or False if manually closed. """ claimant_id = await _caches.claimants.get(channel.id) _unclaim_channel = self._unclaim_channel # It could be possible that there is no claimant cached. In such case, it'd be useless and # possibly incorrect to lock on None. Therefore, the lock is applied conditionally. if claimant_id is not None: decorator = lock.lock_arg(f"{NAMESPACE}.unclaim", "claimant_id", wait=True) _unclaim_channel = decorator(_unclaim_channel) return await _unclaim_channel(channel, claimant_id, is_auto) async def _unclaim_channel(self, channel: discord.TextChannel, claimant_id: int, is_auto: bool) -> None: """Actual implementation of `unclaim_channel`. See that for full documentation.""" await _caches.claimants.delete(channel.id) # Ignore missing tasks because a channel may still be dormant after the cooldown expires. if claimant_id in self.scheduler: self.scheduler.cancel(claimant_id) claimant = self.bot.get_guild(constants.Guild.id).get_member(claimant_id) if claimant is None: log.info(f"{claimant_id} left the guild during their help session; the cooldown role won't be removed") elif not any(claimant.id == user_id for _, user_id in await _caches.claimants.items()): # Remove the cooldown role if the claimant has no other channels left await _cooldown.remove_cooldown_role(claimant) await _message.unpin(channel) await _stats.report_complete_session(channel.id, is_auto) await self.move_to_dormant(channel) # Cancel the task that makes the channel dormant only if called by the close command. # In other cases, the task is either already done or not-existent. if not is_auto: self.scheduler.cancel(channel.id) async def move_to_in_use(self, channel: discord.TextChannel) -> None: """Make a channel in-use and schedule it to be made dormant.""" log.info(f"Moving #{channel} ({channel.id}) to the In Use category.") await _channel.move_to_bottom( channel=channel, category_id=constants.Categories.help_in_use, ) timeout = constants.HelpChannels.idle_minutes * 60 log.trace(f"Scheduling #{channel} ({channel.id}) to become dormant in {timeout} sec.") self.scheduler.schedule_later(timeout, channel.id, self.move_idle_channel(channel)) _stats.report_counts() @commands.Cog.listener() async def on_message(self, message: discord.Message) -> None: """Move an available channel to the In Use category and replace it with a dormant one.""" if message.author.bot: return # Ignore messages sent by bots. await self.init_task if channel_utils.is_in_category(message.channel, constants.Categories.help_available): if not _channel.is_excluded_channel(message.channel): await self.claim_channel(message) else: await _message.check_for_answer(message) @commands.Cog.listener() async def on_message_delete(self, msg: discord.Message) -> None: """ Reschedule an in-use channel to become dormant sooner if the channel is empty. The new time for the dormant task is configured with `HelpChannels.deleted_idle_minutes`. """ await self.init_task if not channel_utils.is_in_category(msg.channel, constants.Categories.help_in_use): return if not await _message.is_empty(msg.channel): return log.info(f"Claimant of #{msg.channel} ({msg.author}) deleted message, channel is empty now. Rescheduling task.") # Cancel existing dormant task before scheduling new. self.scheduler.cancel(msg.channel.id) delay = constants.HelpChannels.deleted_idle_minutes * 60 self.scheduler.schedule_later(delay, msg.channel.id, self.move_idle_channel(msg.channel)) async def wait_for_dormant_channel(self) -> discord.TextChannel: """Wait for a dormant channel to become available in the queue and return it.""" log.trace("Waiting for a dormant channel.") task = asyncio.create_task(self.channel_queue.get()) self.queue_tasks.append(task) channel = await task log.trace(f"Channel #{channel} ({channel.id}) finally retrieved from the queue.") self.queue_tasks.remove(task) return channel
2.65625
3
code/menu/screens/shopmenu.py
LordZagreus/LodeRunner
1
910
<filename>code/menu/screens/shopmenu.py import os import math import random import time from code.menu.menu import Menu from code.tools.eventqueue import EventQueue from code.tools.xml import XMLParser from code.utils.common import coalesce, intersect, offset_rect, log, log2, xml_encode, xml_decode, translate_rgb_to_string from code.constants.common import SCREEN_WIDTH, SCREEN_HEIGHT, PAUSE_MENU_X, PAUSE_MENU_Y, PAUSE_MENU_WIDTH, PAUSE_MENU_HEIGHT, MODE_GAME, TILE_WIDTH, TILE_HEIGHT, DIR_UP, DIR_RIGHT, DIR_DOWN, DIR_LEFT, SPLASH_MODE_GREYSCALE_ANIMATED from code.constants.states import STATUS_ACTIVE, STATUS_INACTIVE, GAME_STATE_ACTIVE, GAME_STATE_NOT_READY from code.constants.newsfeeder import * class ShopMenu(Menu): def __init__(self): Menu.__init__(self) # Assume all shop menus come from already-lightboxed dialogues. self.lightbox_controller.set_interval( self.lightbox_controller.get_target() ) # We're going to keep a handle to the seller so that we can # remove items from their inventory after a purchase... self.vendor = None#seller # Shop title (e.g. "Bob's Fine Items") self.title = "Shoppe" # Salutation (e.g. "Look at these great items") self.message = "Take a look at my inventory." # Before we begin populating the shop menu, we'll first # make sure the NPC seller stocks any specified "required" items... self.required_item_names = [] # Track item quality threshholds (low and high) self.min_item_quality = 0 self.max_item_quality = 0 # Items in stock at any given time self.max_items_stocked = 1 # Number of times the vendor can restock self.max_item_reloads = 1 # Track whether this is the first build or a refresh self.first_build = True # Fire build event self.fire_event("build") def handle_event(self, event, control_center, universe):#params, user_input, network_controller, universe, active_map, session, widget_dispatcher, text_renderer, save_controller, refresh = False): # Events that result from event handling results = EventQueue() # Convenience (action, params) = ( event.get_action(), event.get_params() ) # Build root menu if ( action == "build" ): results.append( self.handle_build_event(event, control_center, universe) ) # Select an item, get confirmation... elif ( action == "show:confirm-purchase" ): results.append( self.handle_show_confirm_purchase_event(event, control_center, universe) ) # Commit an item purchase elif ( action == "game:buy-item" ): results.append( self.handle_shop_buy_item_event(event, control_center, universe) ) # Go to the previous page (e.g. close buy item confirm dialog) elif ( action == "back" ): results.append( self.handle_back_event(event, control_center, universe) ) # Finalize a "back" call elif ( action == "previous-page" ): # Let's just go back one page self.page_back(1) # Leave shop, resume game elif ( action == "resume-game" ): results.append( self.handle_resume_game_event(event, control_center, universe) ) # Restore the universe to active game state, set this very menu to inactive elif ( action == "kill" ): results.append( self.handle_kill_event(event, control_center, universe) ) # Return events return results # Configure the shop menu (more options than your typical menu, we need to define many parameters) def configure(self, options): # Common menu configuration self.__std_configure__(options) if ( "vendor" in options ): self.vendor = options["vendor"] if ( "title" in options ): self.title = options["title"] if ( "message" in options ): self.message = options["message"] if ( "required-item-names" in options ): self.required_item_names.extend( options["required-item-names"] )#.split(";") ) if ( "min-quality" in options ): self.min_item_quality = int( options["min-quality"] ) if ( "max-quality" in options ): self.max_item_quality = int( options["max-quality"] ) if ( "max-items" in options ): self.max_items_stocked = int( options["max-items"] ) if ( "max-reloads" in options ): self.max_item_reloads = int( options["max-reloads"] ) # For chaining return self # Build the shop menu def handle_build_event(self, event, control_center, universe): # Events that result from handling this event (on-birth events, etc.) results = EventQueue() # Convenience params = event.get_params() # Fetch the widget dispatcher widget_dispatcher = control_center.get_widget_dispatcher() # Pause the game so that we can shop, if this is the first build... if (self.first_build): # Pause universe.pause() # Call in the pause splash control_center.get_splash_controller().set_mode(SPLASH_MODE_GREYSCALE_ANIMATED) # Before populating the vendor's inventory (or re-populating), # clear it of any items the player has acquired since last shopping with this vendor... self.vendor.remove_erstwhile_acquired_items_from_inventory(universe) # Populate inventory for this shoppe's vendor... self.vendor.populate_vendor_inventory( min_quality = self.min_item_quality,#int( node.get_attribute("min-quality") ), max_quality = self.max_item_quality,#int( node.get_attribute("min-quality") ), required_item_names = self.required_item_names, max_items = self.max_items_stocked,#int( node.get_attribute("max-items") ), max_reloads = self.max_item_reloads,#int( node.get_attribute("max-reloads") ), universe = universe ) # Scope root = None # Does the vendor have anything in stock? Use this data # to determine which template we load... if ( self.vendor.get_vendor_inventory_count() == 0 ): # Fetch the "nothing in stock" template template = self.fetch_xml_template( "shop.directory", version = "out-of-items" ).add_parameters({ "@x": xml_encode( "%d" % (SCREEN_WIDTH - (int( (SCREEN_WIDTH - PAUSE_MENU_WIDTH) / 2 ))) ), "@y": xml_encode( "%d" % PAUSE_MENU_Y ), "@width": xml_encode( "%d" % int(PAUSE_MENU_WIDTH / 2) ), "@height": xml_encode( "%d" % PAUSE_MENU_HEIGHT ), "@shop-title": xml_encode( self.title ) }) # Compile template root = template.compile_node_by_id("menu") # We have items to sell... else: # Fetch the "shopping directory" template template = self.fetch_xml_template( "shop.directory", version = "default" ).add_parameters({ "@x": xml_encode( "%d" % (SCREEN_WIDTH - (int( (SCREEN_WIDTH - PAUSE_MENU_WIDTH) / 2 ))) ), "@y": xml_encode( "%d" % PAUSE_MENU_Y ), "@width": xml_encode( "%d" % int(PAUSE_MENU_WIDTH / 2) ), "@height": xml_encode( "%d" % PAUSE_MENU_HEIGHT ), "@shop-title": xml_encode( self.title ), "@salutation": xml_encode( self.message ) }) # Compile template root = template.compile_node_by_id("menu") # Now we'll add an entry for each available item... for item_name in self.vendor.get_vendor_inventory_item_names(): # Grab handle item = universe.get_item_by_name(item_name) # Validate if (item): # How much money do we currently have? money = int( universe.get_session_variable("core.gold.wallet").get_value() ) # Template version for this item depends on whether we can afford it... template_version = ( "affordable" if (money >= item.cost) else "unaffordable" ) # Fetch the appropriate template for an individual item template = self.fetch_xml_template( "shop.directory.insert", version = template_version ).add_parameters({ "@item-name": xml_encode( item.name ), "@item-title": xml_encode( item.title ), "@item-cost": xml_encode( "%d" % item.cost ), "@item-advertisement": xml_encode( item.description ) }) # Compile node = template.compile_node_by_id("insert") # Inject into inventory area... root.find_node_by_id("ext.inventory").add_node(node) # Create widget widget = widget_dispatcher.convert_node_to_widget(root, control_center, universe) widget.set_id("root") # We have definitely completed the first build now self.first_build = False # Add the new page self.add_widget_via_event(widget, event) # Return events return results # Show the "are you sure you wanna buy this?" page def handle_show_confirm_purchase_event(self, event, control_center, universe): # Events that result from handling this event (on-birth events, etc.) results = EventQueue() # Convenience params = event.get_params() # Fetch the widget dispatcher widget_dispatcher = control_center.get_widget_dispatcher() # Get a handle to the actual item... item = universe.get_item_by_name( params["item-name"] ) # Validate if (item): # Fetch confirm purchase template template = self.fetch_xml_template("shop.buy.confirm").add_parameters({ "@width": xml_encode( "%d" % int(PAUSE_MENU_WIDTH / 2) ), "@height": xml_encode( "%d" % SCREEN_HEIGHT ), "@item-name": xml_encode( item.get_name() ), "@item-title": xml_encode( item.get_title() ), "@item-cost": xml_encode( "%d" % item.get_cost() ) }) # Compile template root = template.compile_node_by_id("menu") # Create widget widget = widget_dispatcher.convert_node_to_widget(root, control_center, universe) widget.set_id("confirm-shop-purchase") # Add the new page self.add_widget_via_event(widget, event, exclusive = False) # Return events return results # Commit an item purchase def handle_shop_buy_item_event(self, event, control_center, universe): # Events that result from handling this event (on-birth events, etc.) results = EventQueue() # Convenience params = event.get_params() # Get a reference to the item (for cost info, etc.) item = universe.get_item_by_name( params["item-name"] ) # Acquire the item by its name universe.acquire_item_by_name( item.get_name() ) # Post a newsfeeder notice control_center.get_window_controller().get_newsfeeder().post({ "type": NEWS_ITEM_NEW, "title": control_center.get_localization_controller().get_label("new-item-purchased:header"), "content": item.get_title() }) # Add a historical record universe.add_historical_record( "purchases", control_center.get_localization_controller().get_label( "purchased-m-from-n-for-g:message", { "@m": item.get_title(), "@n": self.vendor.nick, "@g": item.get_cost() } ) #"Bought [color=special]%s[/color] for [color=special]%s[/color] gold." % ( item.get_title(), item.get_cost() ) ) # Remove from seller's inventory self.vendor.remove_item_from_vendor_inventory( item.get_name() ) # Increase sales count for vendor self.vendor.increase_sales_count(1) # Reduce player's wallet amount by the cost... universe.increment_session_variable( "core.gold.wallet", -1 * item.get_cost() ) # Count as gold spent universe.increment_session_variable( "stats.gold-spent", item.get_cost() ) # Execute the "wallet-changed" achievement hook universe.execute_achievement_hook( "wallet-changed", control_center ) # Increase universe stats for items bought universe.get_session_variable("stats.items-bought").increment_value(1) # Execute the "bought-item" achievement hook universe.execute_achievement_hook( "bought-item", control_center ) # Get the active map m = universe.get_active_map() # Check for a generic "onpurchase" script for the vendor m.run_script( "%s.onpurchase" % self.vendor.get_name(), control_center, universe, execute_all = True # Try to loop entire script (?) ) # Check for an onpurchase script (perhaps the game reacts in some way to an item you might have bought) m.run_script( name = "%s.onpurchase" % item.get_name(), control_center = control_center, universe = universe, execute_all = True ) # Refresh UI self.refresh_pages(control_center, universe, curtailed_count = 1) # After rebuilding the UI, we will have restocked the NPC's inventory. # Thus, if the NPC has no inventory available, we have just bought their last item... if ( self.vendor.get_vendor_inventory_count() == 0 ): # Execute the "bought-all-items" achievement hook universe.execute_achievement_hook( "bought-all-items", control_center ) # I'm going to set the cursor at "home" position for the shop self.get_widget_by_id("root").set_cursor_at_beginning()#finalize = True) # Return events return results # Go back a page (animated) def handle_back_event(self, event, control_center, universe): # Events that result from handling this event (on-birth events, etc.) results = EventQueue() # Convenience params = event.get_params() # Get the active page page = self.get_active_page() # Validate if (page): # Dismiss the page page.hide( on_complete = "previous-page" ) # Return events return results # Leave the shop and resume play def handle_resume_game_event(self, event, control_center, universe): # Events that result from handling this event (on-birth events, etc.) results = EventQueue() # Convenience params = event.get_params() # Dismiss lightbox effect self.lightbox_controller.set_target(0) # Dismiss the splash controller, calling to resume game action once done... control_center.get_splash_controller().dismiss( on_complete = "game:unpause" ) #hmenu.slide(DIR_LEFT, percent = 1.0) #row_menu.slide(DIR_RIGHT, percent = 1.0) # Resume game, killing shop menu when widget disappears self.get_widget_by_id("root").hide( on_complete = "kill" ) # Return events return results # Kill event. Set game status back to active when shopping is done. def handle_kill_event(self, event, control_center, universe): # Events that result from handling this event (on-birth events, etc.) results = EventQueue() # Convenience params = event.get_params() # Done with the shop menu widget; trash it. self.set_status(STATUS_INACTIVE) # Return events return results
2.890625
3
lib/bridgedb/runner.py
liudonghua123/bridgedb
0
911
# -*- coding: utf-8 ; test-case-name: bridgedb.test.test_runner -*- # # This file is part of BridgeDB, a Tor bridge distribution system. # # :authors: <NAME> 0xA3ADB67A2CDB8B35 <<EMAIL>> # please also see AUTHORS file # :copyright: (c) 2007-2015, The Tor Project, Inc. # (c) 2007-2015, all entities within the AUTHORS file # (c) 2012-2015, Isis Lovecruft # :license: 3-clause BSD, see included LICENSE for information """Classes for running components and servers, as well as daemonisation. ** Module Overview: ** """ from __future__ import print_function import logging import sys import os from twisted.python import procutils def find(filename): """Find the executable ``filename``. :param string filename: The executable to search for. Must be in the effective user ID's $PATH. :rtype: string :returns: The location of the executable, if found. Otherwise, returns None. """ executable = None logging.debug("Searching for installed '%s'..." % filename) which = procutils.which(filename, os.X_OK) if len(which) > 0: for that in which: if os.stat(that).st_uid == os.geteuid(): executable = that break if not executable: return None logging.debug("Found installed script at '%s'" % executable) return executable def generateDescriptors(count=None, rundir=None): """Run a script which creates fake bridge descriptors for testing purposes. This will run Leekspin_ to create bridge server descriptors, bridge extra-info descriptors, and networkstatus document. .. warning: This function can take a very long time to run, especially in headless environments where entropy sources are minimal, because it creates the keys for each mocked OR, which are embedded in the server descriptors, used to calculate the OR fingerprints, and sign the descriptors, among other things. .. _Leekspin: https://gitweb.torproject.org/user/isis/leekspin.git :param integer count: Number of mocked bridges to generate descriptor for. (default: 3) :type rundir: string or None :param rundir: If given, use this directory as the current working directory for the bridge descriptor generator script to run in. The directory MUST already exist, and the descriptor files will be created in it. If None, use the whatever directory we are currently in. """ import subprocess import os.path proc = None statuscode = 0 script = 'leekspin' rundir = rundir if os.path.isdir(rundir) else None count = count if count else 3 try: proc = subprocess.Popen([script, '-n', str(count)], close_fds=True, cwd=rundir) finally: if proc is not None: proc.wait() if proc.returncode: print("There was an error generating bridge descriptors.", "(Returncode: %d)" % proc.returncode) statuscode = proc.returncode else: print("Sucessfully generated %s descriptors." % str(count)) del subprocess return statuscode def doDumpBridges(config): """Dump bridges by assignment to a file. This function handles the commandline '--dump-bridges' option. :type config: :class:`bridgedb.Main.Conf` :param config: The current configuration. """ import bridgedb.Bucket as bucket bucketManager = bucket.BucketManager(config) bucketManager.assignBridgesToBuckets() bucketManager.dumpBridges()
1.859375
2
wce_triage/ops/create_image_runner.py
pfrouleau/wce-triage-v2
3
912
<filename>wce_triage/ops/create_image_runner.py #!/usr/bin/env python3 # # Create disk image # import re, sys, traceback from .tasks import task_fetch_partitions, task_refresh_partitions, task_mount, task_remove_persistent_rules, task_remove_logs, task_fsck, task_shrink_partition, task_expand_partition, task_unmount from .partclone_tasks import task_create_disk_image from .ops_ui import console_ui from ..components.disk import create_storage_instance from .runner import Runner from ..lib.disk_images import make_disk_image_name from .json_ui import json_ui from ..lib.util import init_triage_logger, is_block_device # "Waiting", "Prepare", "Preflight", "Running", "Success", "Failed"] my_messages = { "Waiting": "Saving disk is waiting.", "Prepare": "Savign disk is preparing.", "Preflight": "Saving disk is preparing.", "Running": "{step} of {steps}: Running {task}", "Success": "Saving disk completed successfully.", "Failed": "Saving disk failed." } # class ImageDiskRunner(Runner): '''Runner for creating disk image. does fsck, shrink partition, create disk image and resize the file system back to the max. For now, this is only dealing with the EXT4 linux partition. ''' # FIXME: If I want to make this to a generic clone app, I need to deal with all of partitions on the disk. # One step at a time. def __init__(self, ui, runner_id, disk, destdir, suggestedname=None, partition_id='Linux'): super().__init__(ui, runner_id) self.time_estimate = 600 self.disk = disk self.partition_id = partition_id self.destdir = destdir self.imagename = make_disk_image_name(destdir, suggestedname) pass def prepare(self): super().prepare() # self.tasks.append(task_mount_nfs_destination(self, "Mount the destination volume")) self.tasks.append(task_fetch_partitions("Fetch partitions", self.disk)) self.tasks.append(task_refresh_partitions("Refresh partition information", self.disk)) self.tasks.append(task_mount("Mount the target disk", disk=self.disk, partition_id=self.partition_id)) self.tasks.append(task_remove_persistent_rules("Remove persistent rules", disk=self.disk, partition_id=self.partition_id)) self.tasks.append(task_remove_logs("Remove/Clean Logs", disk=self.disk, partition_id=self.partition_id)) task = task_unmount("Unmount target", disk=self.disk, partition_id=self.partition_id) task.set_teardown_task() self.tasks.append(task) self.tasks.append(task_fsck("fsck partition", disk=self.disk, partition_id=self.partition_id)) self.tasks.append(task_shrink_partition("Shrink partition to smallest", disk=self.disk, partition_id=self.partition_id)) self.tasks.append(task_create_disk_image("Create disk image", disk=self.disk, partition_id=self.partition_id, imagename=self.imagename)) task = task_expand_partition("Expand the partion back", disk=self.disk, partition_id=self.partition_id) task.set_teardown_task() self.tasks.append(task) pass pass if __name__ == "__main__": tlog = init_triage_logger() if len(sys.argv) == 1: print( 'Unloader: devicename part destdir') sys.exit(0) # NOTREACHED pass devname = sys.argv[1] if not is_block_device(devname): print( '%s is not a block device.' % devname) sys.exit(1) # NOTREACHED pass part = sys.argv[2] # This is a partition id destdir = sys.argv[3] # Destination directory disk = create_storage_instance(devname) # Preflight is for me to see the tasks. http server runs this with json_ui. do_it = True if destdir == "preflight": ui = console_ui() do_it = False pass elif destdir == "testflight": ui = console_ui() do_it = True pass else: ui = json_ui(wock_event="saveimage", message_catalog=my_messages) pass if re.match(part, '\d+'): part = int(part) pass runner_id = disk.device_name runner = ImageDiskRunner(ui, runner_id, disk, destdir, partition_id=part) try: runner.prepare() runner.preflight() runner.explain() runner.run() sys.exit(0) # NOTREACHED except Exception as exc: sys.stderr.write(traceback.format_exc(exc) + "\n") sys.exit(1) # NOTREACHED pass pass
2.1875
2
batch_processing_dataflow/play_store_flow.py
KeeplerIO/meetup-hands-on-gcp-2019
1
913
<gh_stars>1-10 import argparse import logging import apache_beam as beam from apache_beam.io import WriteToBigQuery from apache_beam.io import ReadFromText, WriteToText from apache_beam.options.pipeline_options import PipelineOptions class ProcessCSV(beam.DoFn): def process(self, element, *args, **kwargs): import csv formated_element = [element.encode('utf8')] processed_csv = csv.DictReader(formated_element, fieldnames=['App', 'Category', 'Rating', 'Reviews', 'Size', 'Installs', 'Type', 'Price', 'Content_Rating', 'Genres', 'Last_Updated', 'Current_Ver', 'Android_Ver'], delimiter=',') processed_fields = processed_csv.next() if processed_fields.get('Category').replace('.','').isdigit(): return None return [processed_fields] class ParseRecord(beam.DoFn): def process(self, element, *args, **kwargs): from datetime import datetime import math def string_to_megabyte(raw_string): if raw_string.upper().endswith('K'): multiplier = 1000 elif raw_string.upper().endswith('M'): multiplier = 1000 * 1000 else: return None return (float(raw_string[:-1]) * multiplier) / 1000000 new_element = {} rating = float(element['Rating']) new_element['Rating'] = rating if not math.isnan(rating) else None new_element['Size'] = string_to_megabyte(element['Size']) new_element['Price'] = float(element['Price'].replace("$","")) new_element['Installs'] = int(element['Installs'].replace("+", "").replace(",","")) new_element['Last_Updated'] = datetime.strptime(element['Last_Updated'], '%B %d, %Y').strftime('%Y-%m-%d') new_element['Category'] = element['Category'] new_element['Genres'] = element['Genres'] new_element['App'] = element['App'] new_element['Content_Rating'] = element['Content_Rating'] new_element['Reviews'] = element['Reviews'] new_element['Android_Ver'] = element['Android_Ver'] new_element['Type'] = element['Type'] new_element['Current_Ver'] = element['Current_Ver'] logging.info(new_element) return [new_element] def run(argv=None): """Main entry point. It defines and runs the pipeline.""" parser = argparse.ArgumentParser() parser.add_argument('--input', dest='input', default='gs://meetup-batch-processing/input/googleplaystore.csv', help='Input file to process.') parser.add_argument('--output', dest='output', default='gs://meetup-batch-processing/output/googleplaystore.csv', help='Output file to process.') parser.add_argument('--table-output', dest='table_output', default='meetup-hands-on-gcp-2019:googleplaystore_batch_dataflow.play_store', help='Bigquery table name for output.') known_args, pipeline_args = parser.parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) with beam.Pipeline(options=pipeline_options) as pipeline: raw_lines = pipeline | 'ReadFromCsv' >> ReadFromText(known_args.input, skip_header_lines=1) lines = raw_lines | 'processCsv' >> beam.ParDo(ProcessCSV()) output = lines | 'parseRecord' >> beam.ParDo(ParseRecord()) output | 'writeBigQuery' >> WriteToBigQuery(known_args.table_output, write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE, create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER) logging.info('Finished.') if __name__ == '__main__': logging.getLogger().setLevel(logging.INFO) run()
2.40625
2
backend/main/server/resources/Message.py
Manotomo-Alliance-Support-Squad/WWS
0
914
from flask import request from flask_jwt import jwt_required from flask_restful import Resource from main.server import app, cache, db from main.server.models import Message, MessageSchema messages_schema = MessageSchema(many=True) message_schema = MessageSchema() @app.after_request def add_header(response): response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Credentials'] = 'true' response.headers['Access-Control-Allow-Methods'] = 'GET, POST' response.headers[ 'Access-Control-Allow-Headers'] = 'Access-Control-Allow-Headers, Origin,Accept, X-Requested-With, Content-Type, Access-Control-Request-Method, Access-Control-Request-Headers' return response class MessageCount(Resource): @cache.cached(timeout=100) def get(self): """Gets the number of messages available on the server""" return {'status': 'success', 'count': Message.query.count()}, 200 class MessageListRangeResource(Resource): @cache.cached(timeout=100) def get(self, lower, upper): """Gets a range of messages on the server""" if int(lower) < 1: return {'status': 'fail', 'messages': 'Invalid index: ' + str(lower)}, 400 if int(lower) > int(upper): return {'status': 'fail', 'messages': 'Upper range cannot be less than lower range: ' + str(lower) + '>' + str(upper)}, 400 messages = Message.query.filter(Message.messageID >= int(lower)).filter(Message.messageID <= int(upper)) if not messages: return {'status': 'fail', 'messages': 'Out of range: ' + str(lower) + ' - ' + str(upper) + ' does not exist'}, 404 messages = messages_schema.dump(messages) if not Message.query.filter_by(messageID=upper).first(): # the last item in the range return {'status': 'success', 'messages': messages}, 206 # Partial Content Served return {'status': 'success', 'messages': messages}, 200 class MessageListResource(Resource): @cache.cached(timeout=100) def get(self): """Gets all messages on the server""" messages = Message.query.all() messages = messages_schema.dump(messages) if not messages: return {'status': 'success', 'messages': messages}, 206 # Partial Content Served return {'status': 'success', 'messages': messages}, 200 @jwt_required() def post(self): """Add message""" json_data = request.get_json(force=True) if not json_data: return {'status': 'fail', 'message': 'No input data'}, 400 errors = message_schema.validate(json_data) if errors: return {'status': 'fail', 'message': 'Error handling request'}, 422 data = message_schema.load(json_data) message = Message.query.filter_by(orig_msg=data.get('orig_msg')).first() if message: return {'status': 'fail', 'message': 'Message already exists'}, 400 message = Message(orig_msg=data.get('orig_msg'), tl_msg=data.get('tl_msg'), country=data.get('country'), username=data.get('username')) db.session.add(message) db.session.commit() return {'status': 'success', 'message': 'Message successfully created'}, 201 class MessageResource(Resource): @cache.cached(timeout=100) def get(self, messageID): """"Get a message by message ID""" message = Message.query.filter_by(messageID=messageID) if not message.first(): return {'status': 'fail', 'message': 'No message with ID ' + str(messageID) + ' exists'}, 404 message = messages_schema.dump(message) return {'status': 'success', 'messages': message}, 200 @jwt_required() def delete(self, messageID): """delete a message by ID""" message = Message.query.filter_by(messageID=messageID) if not message.first(): return {'status': 'fail', 'message': 'No message with ID ' + str(messageID) + ' exists'}, 404 message.delete() db.session.commit() return {'status': 'sucess', 'message': 'Message Deleted'}, 200
2.4375
2
demos/chicken_pasta/chicken_pasta.py
icaros-usc/wecook
15
915
<filename>demos/chicken_pasta/chicken_pasta.py #!/usr/bin/env python3 import rospy from wecook.msg import ActionMsg, TaskMsg, SceneMsg, ObjectMsg, ContainingMsg, AgentMsg def talker(): pub = rospy.Publisher('WeCookDispatch', TaskMsg, queue_size=10) rospy.init_node('wecook_chicken_pasta', anonymous=True) scene_msg = SceneMsg([ObjectMsg('wall0', 'package://wecook_assets/data/furniture/wall.urdf', [0.75, 0.05, 0., 0., 0., 0., 1.]), ObjectMsg('wall1', 'package://wecook_assets/data/furniture/wall.urdf', [-0.85, 1.45, 0., 0., 0., 0.707, 0.707]), ObjectMsg('counter0', 'package://wecook_assets/data/furniture/kitchen_counter.urdf', [0.3, 0., 0., 0., 0., 0., 1.]), ObjectMsg('counter1', 'package://wecook_assets/data/furniture/kitchen_counter.urdf', [0., 1.0, 0., 0., 0., 0.707, 0.707]), ObjectMsg('sink0', 'package://wecook_assets/data/furniture/sink_counter.urdf', [-1.3, 1.05, 0., 0., 0., 0.707, 0.707]), ObjectMsg('shelf0', 'package://wecook_assets/data/furniture/bookcase.urdf', [0.3, -1.05, 0., 0., 0., 0., 1.]), ObjectMsg('stove0', 'package://wecook_assets/data/objects/stove.urdf', [-0.35, 0.95, 0.75, 0., 0., 0., 1.]), ObjectMsg('pot0', 'package://wecook_assets/data/objects/cooking_pot.urdf', [0.35, 1.1, 0.75, 0., 0., 0., 1.]), ObjectMsg('skillet0', 'package://wecook_assets/data/objects/skillet.urdf', [0.3, 0.7, 0.75, 0., 0., -0.707, .707]), ObjectMsg('cutting_board0', 'package://wecook_assets/data/objects/cutting_board.urdf', [0.3, -0.3, 0.75, 0., 0., 0., 1.]), ObjectMsg('knife0', 'package://wecook_assets/data/objects/knife_big.urdf', [0.215, -0.55, 0.775, 0., 0., 0., 1.]), ObjectMsg('plate0', 'package://wecook_assets/data/objects/plate.urdf', [0.3, 0.075, 0.75, 0., 0., 0., 1.]), ObjectMsg('bowl0', 'package://wecook_assets/data/objects/bowl_green.urdf', [0.45, 0.375, 0.75, 0., 0., 0., 1.]), ObjectMsg('bowl1', 'package://wecook_assets/data/objects/bowl_green.urdf', [0.15, 0.375, 0.75, 0., 0., 0., 1.]), ObjectMsg('oil0', 'package://wecook_assets/data/objects/olive_oil.urdf', [0., 1.15, 0.75, 0., 0., 0.707, 0.707]), ObjectMsg('salt0', 'package://wecook_assets/data/objects/salt.urdf', [0., 1.0, 0.75, 0., 0., 0.707, 0.707]), ObjectMsg('pepper0', 'package://wecook_assets/data/objects/black_pepper.urdf', [0., 0.9, 0.75, 0., 0., 0.707, 0.707]), ObjectMsg('chicken0', 'package://wecook_assets/data/food/chicken.urdf', [0.3, 0.075, 0.757, 0., 0., 0., 1.]), ObjectMsg('lime0', 'package://wecook_assets/data/food/lime.urdf', [0.3, -0.3, 0.757, 0., 0., 0., 1.]), ObjectMsg('pasta0', 'package://wecook_assets/data/food/pasta.urdf', [0.45, 0.375, 0.757, 0., 0., 0., 1.])], [ContainingMsg(['plate0', 'chicken0']), ContainingMsg(['bowl0', 'pasta0'])]) task_msg = TaskMsg(scene_msg, [ActionMsg(['p1'], 'cut', ['plate0'], 'knife0', ['lime0'])], [AgentMsg('p1', 'r', [0., 0., 0.75, 0., 0., 0., 0.])], "", "", "follow", "RRTConnect", False) # sleeping 10 seconds to publish rospy.sleep(1) pub.publish(task_msg) if __name__ == '__main__': try: talker() except rospy.ROSInterruptException: pass
2.140625
2
volttron/platform/vip/agent/subsystems/heartbeat.py
rmay-intwine/volttron
0
916
# -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # # Copyright 2017, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # This material was prepared as an account of work sponsored by an agency of # the United States Government. Neither the United States Government nor the # United States Department of Energy, nor Battelle, nor any of their # employees, nor any jurisdiction or organization that has cooperated in the # development of these materials, makes any warranty, express or # implied, or assumes any legal liability or responsibility for the accuracy, # completeness, or usefulness or any information, apparatus, product, # software, or process disclosed, or represents that its use would not infringe # privately owned rights. Reference herein to any specific commercial product, # process, or service by trade name, trademark, manufacturer, or otherwise # does not necessarily constitute or imply its endorsement, recommendation, or # favoring by the United States Government or any agency thereof, or # Battelle Memorial Institute. The views and opinions of authors expressed # herein do not necessarily state or reflect those of the # United States Government or any agency thereof. # # PACIFIC NORTHWEST NATIONAL LABORATORY operated by # BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} import os import weakref from datetime import datetime from .base import SubsystemBase from volttron.platform.messaging.headers import TIMESTAMP from volttron.platform.agent.utils import (get_aware_utc_now, format_timestamp) from volttron.platform.scheduling import periodic from ..errors import Unreachable, VIPError """The heartbeat subsystem adds an optional periodic publish to all agents. Heartbeats can be started with agents and toggled on and off at runtime. """ __docformat__ = 'reStructuredText' __version__ = '1.0' class Heartbeat(SubsystemBase): def __init__(self, owner, core, rpc, pubsub, heartbeat_autostart, heartbeat_period): self.owner = owner self.core = weakref.ref(core) self.pubsub = weakref.ref(pubsub) self.autostart = heartbeat_autostart self.period = heartbeat_period self.enabled = False self.connect_error = False def onsetup(sender, **kwargs): rpc.export(self.start, 'heartbeat.start') rpc.export(self.start_with_period, 'heartbeat.start_with_period') rpc.export(self.stop, 'heartbeat.stop') rpc.export(self.restart, 'heartbeat.restart') rpc.export(self.set_period, 'heartbeat.set_period') def onstart(sender, **kwargs): if self.autostart: self.start() core.onsetup.connect(onsetup, self) core.onstart.connect(onstart, self) core.onconnected.connect(self.reconnect) def start(self): """RPC method Starts an agent's heartbeat. """ if not self.enabled: self.scheduled = self.core().schedule(periodic(self.period), self.publish) self.enabled = True def start_with_period(self, period): """RPC method Set period and start heartbeat. :param period: Time in seconds between publishes. """ self.set_period(period) self.start() def reconnect(self, sender, **kwargs): if self.connect_error: self.restart() self.connect_error = False def stop(self): """RPC method Stop an agent's heartbeat. """ if self.enabled: # Trap the fact that scheduled may not have been # set yet if the start hasn't been called. try: self.scheduled.cancel() except AttributeError: pass self.enabled = False def restart(self): """RPC method Restart the heartbeat with the current period. The heartbeat will be immediately sending the heartbeat to the message bus. """ self.stop() self.start() def set_period(self, period): """RPC method Set heartbeat period. :param period: Time in seconds between publishes. """ if self.enabled: self.stop() self.period = period self.start() else: self.period = period def publish(self): topic = 'heartbeat/' + self.core().identity headers = {TIMESTAMP: format_timestamp(get_aware_utc_now())} message = self.owner.vip.health.get_status_value() try: self.pubsub().publish('pubsub', topic, headers, message) except Unreachable as exc: self.connect_error = True self.stop()
1.210938
1
datasets/experimental/ni_superalloys/Ni_superalloy.py
kyawlin/smlb
0
917
"""Ni-Superalloy dataset. Scientific Machine Learning Benchmark A benchmark of regression models in chem- and materials informatics. 2019, <NAME>, Citrine Informatics. See class NiSuperalloyDataset for details. """ import os import json import zipfile from typing import List, Optional, Tuple, Union import numpy as np from smlb.exceptions import InvalidParameterError from smlb.parameters import params from smlb.tabular_data import TabularData class NiSuperalloyDataset(TabularData): """ Ni-Superalloy dataset. Based on: <NAME>, <NAME>, <NAME>, <NAME>: Design of a nickel-base superalloy using a neural network, Materials & Design 131: 358-365, Elsevier, 2017. DOI 10.1016/j.matdes.2017.06.007 The dataset was downloaded from the Citrination platform (https://citrination.com), dataset identifier #153493, Version 10. There are 2800 rows. The data have columns for composition (25 elements are present in at least one row), whether the alloy was powder processed (0 or 1), whether it was pressure treated (0 or 1), heat treatment time (hours) and temperature (degrees Celcius) for up to 4 heat treatment steps, the total time spent in heat treatment (hours), the maximum heat treatment temperature (degrees Celcius), and the area under the time-temperature curve (degrees Celcius * hours). A value of 0 generally implies that the heat treatment step was not done, but there are some missing values. The total time and max temperature are generally more reliable than the individual heating steps. The total compositions do not always add up to 100%, but with about a dozen exceptions they always add up to somewhere between 95% and 105%. There are also three columns for a pressure treatment step (temperature, time, pressure), but since only 51 rows have non-zero entries, this information is not used. There are 5 labels: ultimate tensile strength (MPa), elongation (unitless), stress rupture stress (MPa), stress rupture time (hours), and yield strength (MPa). Tensile strength and elongation occur together in 898 rows, stress rupture stress and time occur together in 856 rows, and yield strength occurs in 1046 rows. 898+856+1046=2800, so every row has exactly one output set. The other values are denoted as NaN. """ DEFAULT_PATH = os.path.split(os.path.realpath(__file__))[0] + "/ni_superalloys_3.json.zip" POWDER_PROCESSED_NO = 0 POWDER_PROCESSED_YES = 1 def __init__( self, labels_to_load: Optional[Union[str, List[str]]] = None, ignore_dubious: bool = False ): """Initialize Ni-superalloy dataset with specified labels. Parameters: labels_to_load (str or List[str]): which labels to load. Options are 'Yield Strength', 'Ultimate Tensile Strength', 'Stress Rupture Time', 'Stress Rupture Stress', and 'Elongation'. If None, then all labels are loaded. ignore_dubious: whether or not to ignore samples that have something questionable about them """ labels_to_load = params.optional_( labels_to_load, lambda arg: params.any_( arg, params.string, lambda arg: params.sequence(arg, type_=str), ), ) ignore_dubious = params.boolean(ignore_dubious) filepath = self.DEFAULT_PATH data, labels = self._load_data_and_labels(filepath, labels_to_load, ignore_dubious) super().__init__(data=data, labels=labels) def _load_data_and_labels( self, filepath: str, labels_to_load: Optional[List[str]] = None, ignore_dubious: bool = False, ): """Load data and labels from .json file.""" raw = self._unzip_json_file(filepath) if ignore_dubious: raw = [e for e in raw if self._filter_dubious(e)] # dtype=object is necessary because this is a mixed-type array (float and string) data = np.array([self._parse_json_data(e) for e in raw], dtype=object) labels = np.array([self._parse_json_labels(e, labels_to_load) for e in raw], dtype=float) return data, labels @staticmethod def _unzip_json_file(filepath: str): """Open and read zipped json file.""" filename = os.path.basename(filepath) assert ( filename[-4:] == ".zip" ), f"File path must point to a .zip file, instead got '{filepath}'" with zipfile.ZipFile(filepath) as zf: unzipped_filename = filename[:-4] with zf.open(unzipped_filename) as fp: raw = json.load(fp) return raw @staticmethod def _extract_raw_composition(entry: dict) -> List[dict]: """Get composition in its raw form.""" raw_composition = entry.get("composition") if raw_composition is None or not isinstance(raw_composition, list): raise InvalidParameterError( expected="Chemical composition as a list", got=raw_composition ) return raw_composition @staticmethod def _filter_dubious(entry: dict) -> bool: """ Determine whether or not a json entry has something questionable about it. Currently, the only thing filtered on is if the composition has an asterisk in it, which occurs for 6 samples. Parameters: entry (dict): A json entry corresponding to a row in the dataset. Returns: bool True if the composition contains an asterisk. """ raw_composition = NiSuperalloyDataset._extract_raw_composition(entry) composition_dict = NiSuperalloyDataset._parse_composition_as_dict(raw_composition) composition_dict_float, exception_caught = NiSuperalloyDataset._dict_values_to_float( composition_dict ) return not exception_caught def _parse_json_data(self, entry: dict): """ Helper function to parse data in a single row from the raw json. Parameters: entry (dict): A json entry corresponding to a row in the dataset. Returns: array Array of data in this row. """ assert entry["category"] == "system.chemical" raw_composition = NiSuperalloyDataset._extract_raw_composition(entry) composition: str = self._parse_composition(raw_composition) properties = entry.get("properties") if properties is None or not isinstance(properties, list): raise InvalidParameterError( expected="A list of dictionaries, one for each property", got=properties ) heat_treatment_1_time = self._get_scalar_property( properties, "Heat treatment 1 Time", units="hours", default_value=0 ) heat_treatment_1_temp = self._get_scalar_property( properties, "Heat treatment 1 Temperature", units="$^{\\circ}$C", default_value=0 ) heat_treatment_2_time = self._get_scalar_property( properties, "Heat treatment 2 Time", units="hours", default_value=0 ) heat_treatment_2_temp = self._get_scalar_property( properties, "Heat treatment 2 Temperature", units="$^{\\circ}$C", default_value=0 ) heat_treatment_3_time = self._get_scalar_property( properties, "Heat treatment 3 Time", units="hours", default_value=0 ) heat_treatment_3_temp = self._get_scalar_property( properties, "Heat treatment 3 Temperature", units="$^{\\circ}$C", default_value=0 ) heat_treatment_4_time = self._get_scalar_property( properties, "Heat treatment 4 Time", units="hours", default_value=0 ) heat_treatment_4_temp = self._get_scalar_property( properties, "Heat treatment 4 Temperature", units="$^{\\circ}$C", default_value=0 ) total_heat_treatment_time = self._get_scalar_property( properties, "Total heat treatment time", units="hours" ) max_heat_treatment_temp = self._get_scalar_property( properties, "Max Heat Treatment Temperature", units="$^{\\circ}$C" ) area_under_heat_treatment_curve = self._get_scalar_property( properties, "Area under heat treatment curve", units="$^{\\circ}$C * hours" ) powder_processed_dict = {"No": self.POWDER_PROCESSED_NO, "Yes": self.POWDER_PROCESSED_YES} powder_processed = self._get_categorical_property( properties, "Powder processed", categories_dict=powder_processed_dict ) data_array = [ composition, heat_treatment_1_time, heat_treatment_1_temp, heat_treatment_2_time, heat_treatment_2_temp, heat_treatment_3_time, heat_treatment_3_temp, heat_treatment_4_time, heat_treatment_4_temp, total_heat_treatment_time, max_heat_treatment_temp, area_under_heat_treatment_curve, powder_processed, ] return data_array def _parse_json_labels(self, entry: dict, labels_to_load: Optional[List[str]] = None): """ Helper function to parse labels in a single row from the raw json. Parameters: entry (dict): A json entry corresponding to a row in the dataset. labels_to_load (List[str]): Optional list of labels to load. Returns: array Array of labels in this row that we are interested in. """ if labels_to_load is None: labels_to_load = [ "Yield Strength", "Ultimate Tensile Strength", "Stress Rupture Time", "Stress Rupture Stress", "Elongation", ] properties = entry.get("properties") if properties is None or not isinstance(properties, list): raise InvalidParameterError( expected="A list of dictionaries, one for each property", got=properties ) labels_array = [] for label in labels_to_load: labels_array.append(self._get_scalar_property(properties, label, default_value=None)) return labels_array @staticmethod def _parse_composition(raw_composition: List[dict]) -> str: """ Helper function to parse composition as a string. Parameters: raw_composition (List[dict]): A list, each entry of which corresponds to an element. An entry is a dict with an 'element' key and an 'idealWeightPercent' key. The element is a string (e.g., 'Cu') and the weight percent is another dict with a single key, 'value', pointing to a floating point number. The values are in percentage points, and add up to ~100. Returns: str Chemical composition as string, e.g. 'Al5.5Ni94.0W0.5' """ composition_dict = NiSuperalloyDataset._parse_composition_as_dict(raw_composition) composition_dict_float, _ = NiSuperalloyDataset._dict_values_to_float(composition_dict) composition_str: str = "" for element_name, element_amount in composition_dict_float.items(): if element_amount > 0: composition_str += element_name + str(element_amount) return composition_str @staticmethod def _parse_composition_as_dict(raw_composition: List[dict]) -> dict: """ Helper function to parse composition as a dictionary. Parameters: raw_composition (List[dict]): A list, each entry of which corresponds to an element. An entry is a dict with an 'element' key and an 'idealWeightPercent' key. The element is a string (e.g., 'Cu') and the weight percent is another dict with a single key, 'value', pointing to a floating point number. The values are in percentage points, and add up to ~100 (but not exactly). Returns: dict Chemical composition as a dictionary with the elements as keys and their raw amounts as values """ composition_dict = dict() for entry in raw_composition: try: element_name = entry["element"] element_amount = entry["idealWeightPercent"]["value"] except KeyError: raise InvalidParameterError( expected="Element amount as a dictionary of the form\n" "{'element': <element name>," "'idealWeightPercent': " "{'value': <element amount>}}", got=entry, ) composition_dict[element_name] = element_amount return composition_dict @staticmethod def _dict_values_to_float(d: dict) -> Tuple[dict, bool]: """ Convert a dictionary's values to their floating point representations, if possible. Parameters: d: a dictionary Returns: dict, bool A modified version of `d`, and a boolean flag indicating whether or not an Exception was caught """ d_copy = dict() exception_caught = False for key, value in d.items(): try: value_float = float(value) except ValueError: exception_caught = True value_float = NiSuperalloyDataset._parse_peculiar_amount(value) d_copy[key] = value_float return d_copy, exception_caught @staticmethod def _parse_peculiar_amount(x: str) -> float: """ Deals with dataset-specific-peculiarities in composition amounts. Some composition amounts have a trailing asterisk, e.g., '2*'. The meaning is unclear. Perhaps it denotes that the amount is imprecise. In any case, they only occur in 6 samples. The trailing asterisk will be ignored. """ if x[-1] == "*": x = x[:-1] try: return float(x) except ValueError: raise InvalidParameterError("Amount as a float", x) def _get_scalar_property( self, properties: List[dict], property_name: str, units: Optional[str] = None, default_value: Optional[float] = None, ) -> float: """ A helper function to get a single scalar property. This calls _get_single_property and then checks that the result can be turned into a float. Parameters: properties: A list of dicts, each of which is a single property. property_name: The name of the property to get the value of. units: Optional expected units string. default_value: Value to return if `property_name` is not present. Raises: InvalidParameterError: if the value cannot be expressed as a float Returns: float The value of the desired property. """ try: val = self._get_single_property(properties, property_name, units, default_value) if val is None: return None return float(val) except ValueError: raise InvalidParameterError( expected=f"Property {property_name} should have a value " f"that can be expressed as a float", got=properties, ) def _get_categorical_property( self, properties: List[dict], property_name: str, categories_dict: dict ) -> int: """ Helper function to get a single categorical property as an int. Parameters: properties: A list of dicts, each of which is a single property. property_name: The name of the property to get the value of. categories_dict: Dict from the categorical property (string) to a unique integer value. Raises: InvalidParameterError: if the value is not in the expected list of possible categories as given by the keys in `categories_dict` Returns: int An integer that corresponds to the value of the desired property. """ category = self._get_single_property(properties, property_name) try: return categories_dict[category] except KeyError: raise InvalidParameterError( f"A value in the array: {categories_dict.keys()}", category ) @staticmethod def _get_single_property( properties: List[dict], property_name: str, units: Optional[str] = None, default_value=None ): """ Helper function to get a single property. Parameters: properties: A list of dicts, each of which is a single property. Each entry is expected to have a 'name' field that corresponds to the property name and a `scalars` field that is a list with one entry, a dict of the form {'value': <property value>}. It may also have a 'units' field. property_name: The name of the property to get the value of. `properties` is expected to have exactly one entry with the 'name' field equal to `property_name`. units: Optional expected value of 'units' field. If specified, then there must be a 'units' field and its value must correspond to `units`. default_value: Value to return if `property_name` is not present. Raises: InvalidParameterError: if `properties` does not conform to the expected structure Returns: The value of the property `property_name` """ matching_props = [prop for prop in properties if prop.get("name") == property_name] if len(matching_props) == 0: return default_value elif len(matching_props) > 1: raise InvalidParameterError( expected=f"Only one entry in properties should have name" f" '{property_name}'", got=properties, ) matching_prop = matching_props[0] try: scalars = matching_prop["scalars"] assert len(scalars) == 1 val = scalars[0]["value"] if units is not None: assert matching_prop["units"] == units except (KeyError, AssertionError): units_str = "" if units is None else f", 'units': {units}" raise InvalidParameterError( expected="Property as a dictionary of the form\n" "{'name': <property name>, 'scalars': " "[{'value': <property value>}]" + units_str + "}", got=matching_prop, ) return val
2.71875
3
metricbeat/module/postgresql/test_postgresql.py
SHolzhauer/beats
4
918
import metricbeat import os import pytest import sys import unittest class Test(metricbeat.BaseTest): COMPOSE_SERVICES = ['postgresql'] def common_checks(self, output): # Ensure no errors or warnings exist in the log. self.assert_no_logged_warnings() for evt in output: top_level_fields = metricbeat.COMMON_FIELDS + ["postgresql"] self.assertCountEqual(self.de_dot(top_level_fields), evt.keys()) self.assert_fields_are_documented(evt) def get_hosts(self): username = "postgres" host = self.compose_host() dsn = "postgres://{}?sslmode=disable".format(host) return ( [dsn], username, os.getenv("POSTGRESQL_PASSWORD"), ) @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") @pytest.mark.tag('integration') def test_activity(self): """ PostgreSQL module outputs an event. """ hosts, username, password = self.get_hosts() self.render_config_template(modules=[{ "name": "postgresql", "metricsets": ["activity"], "hosts": hosts, "username": username, "password": password, "period": "5s" }]) proc = self.start_beat() self.wait_until(lambda: self.output_lines() > 0) proc.check_kill_and_wait() output = self.read_output_json() self.common_checks(output) for evt in output: assert "name" in evt["postgresql"]["activity"]["database"] assert "oid" in evt["postgresql"]["activity"]["database"] assert "state" in evt["postgresql"]["activity"] @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") @pytest.mark.tag('integration') def test_database(self): """ PostgreSQL module outputs an event. """ hosts, username, password = self.get_hosts() self.render_config_template(modules=[{ "name": "postgresql", "metricsets": ["database"], "hosts": hosts, "username": username, "password": password, "period": "5s" }]) proc = self.start_beat() self.wait_until(lambda: self.output_lines() > 0) proc.check_kill_and_wait() output = self.read_output_json() self.common_checks(output) for evt in output: assert "name" in evt["postgresql"]["database"] assert "oid" in evt["postgresql"]["database"] assert "blocks" in evt["postgresql"]["database"] assert "rows" in evt["postgresql"]["database"] assert "conflicts" in evt["postgresql"]["database"] assert "deadlocks" in evt["postgresql"]["database"] @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") @pytest.mark.tag('integration') def test_bgwriter(self): """ PostgreSQL module outputs an event. """ hosts, username, password = self.get_hosts() self.render_config_template(modules=[{ "name": "postgresql", "metricsets": ["bgwriter"], "hosts": hosts, "username": username, "password": password, "period": "5s" }]) proc = self.start_beat() self.wait_until(lambda: self.output_lines() > 0) proc.check_kill_and_wait() output = self.read_output_json() self.common_checks(output) for evt in output: assert "checkpoints" in evt["postgresql"]["bgwriter"] assert "buffers" in evt["postgresql"]["bgwriter"] assert "stats_reset" in evt["postgresql"]["bgwriter"]
2.15625
2
pytorch_lightning/accelerators/cpu_backend.py
ozen/pytorch-lightning
0
919
<gh_stars>0 # Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from pytorch_lightning.accelerators.base_backend import Accelerator from pytorch_lightning.utilities import AMPType, rank_zero_warn from pytorch_lightning.utilities.exceptions import MisconfigurationException class CPUBackend(Accelerator): def __init__(self, trainer, cluster_environment=None): super().__init__(trainer, cluster_environment) def setup(self, model): # run through amp wrapper if self.trainer.amp_backend: raise MisconfigurationException('amp + cpu is not supported. Please use a GPU option') # call setup after the ddp process has connected self.trainer.call_setup_hook(model) # CHOOSE OPTIMIZER # allow for lr schedulers as well self.setup_optimizers(model) self.trainer.model = model def train(self): model = self.trainer.model # set up training routine self.trainer.train_loop.setup_training(model) # train or test results = self.train_or_test() return results def training_step(self, args): if self.trainer.amp_backend == AMPType.NATIVE: with torch.cuda.amp.autocast(): output = self.trainer.model.training_step(*args) else: output = self.trainer.model.training_step(*args) return output def validation_step(self, args): if self.trainer.amp_backend == AMPType.NATIVE: with torch.cuda.amp.autocast(): output = self.trainer.model.validation_step(*args) else: output = self.trainer.model.validation_step(*args) return output def test_step(self, args): if self.trainer.amp_backend == AMPType.NATIVE: with torch.cuda.amp.autocast(): output = self.trainer.model.test_step(*args) else: output = self.trainer.model.test_step(*args) return output
2.0625
2
books/admin.py
aurphillus/Django-Library-Completed
0
920
from django.contrib import admin from books.models import Genre, Author, Book, TBR # Register your models here. admin.site.register(Genre) admin.site.register(Author) admin.site.register(Book) admin.site.register(TBR)
1.585938
2
rabbitmq/tests/common.py
jfmyers9/integrations-core
1
921
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) import os from packaging import version from datadog_checks.base.utils.common import get_docker_hostname HERE = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.dirname(os.path.dirname(HERE)) RABBITMQ_VERSION_RAW = os.environ['RABBITMQ_VERSION'] RABBITMQ_VERSION = version.parse(RABBITMQ_VERSION_RAW) CHECK_NAME = 'rabbitmq' HOST = get_docker_hostname() PORT = 15672 URL = 'http://{}:{}/api/'.format(HOST, PORT) CONFIG = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': '<PASSWORD>', 'queues': ['test1'], 'tags': ["tag1:1", "tag2"], 'exchanges': ['test1'], } CONFIG_NO_NODES = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': '<PASSWORD>', 'queues': ['test1'], 'tags': ["tag1:1", "tag2"], 'exchanges': ['test1'], 'collect_node_metrics': False, } CONFIG_REGEX = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': '<PASSWORD>', 'queues_regexes': [r'test\d+'], 'exchanges_regexes': [r'test\d+'], } CONFIG_VHOSTS = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': '<PASSWORD>', 'vhosts': ['/', 'myvhost'], } CONFIG_WITH_FAMILY = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': '<PASSWORD>', 'tag_families': True, 'queues_regexes': [r'(test)\d+'], 'exchanges_regexes': [r'(test)\d+'], } CONFIG_DEFAULT_VHOSTS = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': '<PASSWORD>', 'vhosts': ['/', 'test'], } CONFIG_TEST_VHOSTS = { 'rabbitmq_api_url': URL, 'rabbitmq_user': 'guest', 'rabbitmq_pass': '<PASSWORD>', 'vhosts': ['test', 'test2'], } EXCHANGE_MESSAGE_STATS = { 'ack': 1.0, 'ack_details': {'rate': 1.0}, 'confirm': 1.0, 'confirm_details': {'rate': 1.0}, 'deliver_get': 1.0, 'deliver_get_details': {'rate': 1.0}, 'publish': 1.0, 'publish_details': {'rate': 1.0}, 'publish_in': 1.0, 'publish_in_details': {'rate': 1.0}, 'publish_out': 1.0, 'publish_out_details': {'rate': 1.0}, 'return_unroutable': 1.0, 'return_unroutable_details': {'rate': 1.0}, 'redeliver': 1.0, 'redeliver_details': {'rate': 1.0}, }
1.867188
2
st2common/st2common/util/pack.py
timgates42/st2
0
922
<reponame>timgates42/st2 # Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import os import re import collections import six from st2common.util import schema as util_schema from st2common.constants.pack import MANIFEST_FILE_NAME from st2common.constants.pack import PACK_REF_WHITELIST_REGEX from st2common.content.loader import MetaLoader from st2common.persistence.pack import Pack from st2common.exceptions.apivalidation import ValueValidationException from st2common.util import jinja as jinja_utils __all__ = [ 'get_pack_ref_from_metadata', 'get_pack_metadata', 'get_pack_warnings', 'get_pack_common_libs_path_for_pack_ref', 'get_pack_common_libs_path_for_pack_db', 'validate_config_against_schema', 'normalize_pack_version' ] # Common format for python 2.7 warning if six.PY2: PACK_PYTHON2_WARNING = "DEPRECATION WARNING: Pack %s only supports Python 2.x. " \ "Python 2 support will be dropped in future releases. " \ "Please consider updating your packs to work with Python 3.x" else: PACK_PYTHON2_WARNING = "DEPRECATION WARNING: Pack %s only supports Python 2.x. " \ "Python 2 support has been removed since st2 v3.4.0. " \ "Please update your packs to work with Python 3.x" def get_pack_ref_from_metadata(metadata, pack_directory_name=None): """ Utility function which retrieves pack "ref" attribute from the pack metadata file. If this attribute is not provided, an attempt is made to infer "ref" from the "name" attribute. :rtype: ``str`` """ pack_ref = None # The rules for the pack ref are as follows: # 1. If ref attribute is available, we used that # 2. If pack_directory_name is available we use that (this only applies to packs # which are in sub-directories) # 2. If attribute is not available, but pack name is and pack name meets the valid name # criteria, we use that if metadata.get('ref', None): pack_ref = metadata['ref'] elif pack_directory_name and re.match(PACK_REF_WHITELIST_REGEX, pack_directory_name): pack_ref = pack_directory_name else: if re.match(PACK_REF_WHITELIST_REGEX, metadata['name']): pack_ref = metadata['name'] else: msg = ('Pack name "%s" contains invalid characters and "ref" attribute is not ' 'available. You either need to add "ref" attribute which contains only word ' 'characters to the pack metadata file or update name attribute to contain only' 'word characters.') raise ValueError(msg % (metadata['name'])) return pack_ref def get_pack_metadata(pack_dir): """ Return parsed metadata for a particular pack directory. :rtype: ``dict`` """ manifest_path = os.path.join(pack_dir, MANIFEST_FILE_NAME) if not os.path.isfile(manifest_path): raise ValueError('Pack "%s" is missing %s file' % (pack_dir, MANIFEST_FILE_NAME)) meta_loader = MetaLoader() content = meta_loader.load(manifest_path) if not content: raise ValueError('Pack "%s" metadata file is empty' % (pack_dir)) return content def get_pack_warnings(pack_metadata): """ Return warning string if pack metadata indicates only python 2 is supported :rtype: ``str`` """ warning = None versions = pack_metadata.get('python_versions', None) pack_name = pack_metadata.get('name', None) if versions and set(versions) == set(['2']): warning = PACK_PYTHON2_WARNING % pack_name return warning def validate_config_against_schema(config_schema, config_object, config_path, pack_name=None): """ Validate provided config dictionary against the provided config schema dictionary. """ # NOTE: Lazy improt to avoid performance overhead of importing this module when it's not used import jsonschema pack_name = pack_name or 'unknown' schema = util_schema.get_schema_for_resource_parameters(parameters_schema=config_schema, allow_additional_properties=True) instance = config_object try: cleaned = util_schema.validate(instance=instance, schema=schema, cls=util_schema.CustomValidator, use_default=True, allow_default_none=True) for key in cleaned: if (jinja_utils.is_jinja_expression(value=cleaned.get(key)) and "decrypt_kv" in cleaned.get(key) and config_schema.get(key).get('secret')): raise ValueValidationException('Values specified as "secret: True" in config ' 'schema are automatically decrypted by default. Use ' 'of "decrypt_kv" jinja filter is not allowed for ' 'such values. Please check the specified values in ' 'the config or the default values in the schema.') except jsonschema.ValidationError as e: attribute = getattr(e, 'path', []) if isinstance(attribute, (tuple, list, collections.Iterable)): attribute = [str(item) for item in attribute] attribute = '.'.join(attribute) else: attribute = str(attribute) msg = ('Failed validating attribute "%s" in config for pack "%s" (%s): %s' % (attribute, pack_name, config_path, six.text_type(e))) raise jsonschema.ValidationError(msg) return cleaned def get_pack_common_libs_path_for_pack_ref(pack_ref): pack_db = Pack.get_by_ref(pack_ref) pack_common_libs_path = get_pack_common_libs_path_for_pack_db(pack_db=pack_db) return pack_common_libs_path def get_pack_common_libs_path_for_pack_db(pack_db): """ Return the pack's common lib path. This is the path where common code for sensors and actions are placed. For example, if the pack is at /opt/stackstorm/packs/my_pack, you can place common library code for actions and sensors in /opt/stackstorm/packs/my_pack/lib/. This common library code is only available for python sensors and actions. The lib structure also needs to follow a python convention with a __init__.py file. :param pack_db: Pack DB model :type pack_db: :class:`PackDB` :rtype: ``str`` """ pack_dir = getattr(pack_db, 'path', None) if not pack_dir: return None libs_path = os.path.join(pack_dir, 'lib') return libs_path def normalize_pack_version(version): """ Normalize old, pre StackStorm v2.1 non valid semver version string (e.g. 0.2) to a valid semver version string (0.2.0). :rtype: ``str`` """ version = str(version) version_seperator_count = version.count('.') if version_seperator_count == 1: version = version + '.0' return version
1.570313
2
module1-introduction-to-sql/query.py
jrslagle/DS-Unit-3-Sprint-2-SQL-and-Databases
0
923
# Look at the charactercreator_character table # GET_CHARACTERS = """ # SELECT * # FROM charactercreator_character; # """ # How many total Characters are there? (302) TOTAL_CHARACTERS = """ SELECT COUNT(*) as number_of_characters FROM charactercreator_character; """ # How many of each specific subclass? # TOTAL_SUBCLASS = """ # SELECT # (SELECT COUNT(*) FROM charactercreator_necromancer) AS necros, # (SELECT COUNT(*) FROM charactercreator_mage) AS mages, # (SELECT COUNT(*) FROM charactercreator_thief) AS thiefs, # (SELECT COUNT(*) FROM charactercreator_cleric) AS clerics, # (SELECT COUNT(*) FROM charactercreator_fighter) AS fighters; # """ CLASS = "SELECT COUNT(*) FROM charactercreator_" # How many total Items? (174) TOTAL_ITEMS = """ SELECT COUNT(item_id) as items FROM armory_item; """ # How many of the Items are weapons? (37) WEAPONS = """ SELECT COUNT(item_ptr_id) FROM armory_weapon; """ # How many of the items are not weapons? (137) NON_WEAPONS = """ SELECT COUNT(items.name) FROM armory_item as items WHERE items.item_id NOT IN( SELECT armory_weapon.item_ptr_id FROM armory_weapon); """ # How many Items does each character have? (Return first 20 rows) CHARACTER_ITEMS = """ SELECT character.name as "character_name", COUNT(inventory.id) as "#_of_items" FROM charactercreator_character AS character, charactercreator_character_inventory AS inventory WHERE character.character_id = inventory.character_id GROUP BY character.name ORDER BY character.name LIMIT 20; """ # How many Weapons does each character have? (Return first 20 rows) CHARACTER_WEAPONS = """ SELECT character.name as "character_name", COUNT(weapon.item_ptr_id) as "#_of_weapons" FROM charactercreator_character AS character, charactercreator_character_inventory AS inventory, armory_weapon as weapon WHERE character.character_id = inventory.character_id AND inventory.item_id = weapon.item_ptr_id GROUP BY character.name ORDER BY character.name LIMIT 20; """ # On average, how many Items does each Character have? (3.02) AVG_CHARACTER_ITEMS = """ SELECT AVG("#_of_items") as "avg_#_of_items" FROM ( SELECT COUNT(inventory.id) AS "#_of_items" FROM charactercreator_character AS character, charactercreator_character_inventory AS inventory WHERE character.character_id = inventory.character_id GROUP BY character.name ); """ # On average, how many Weapons does each character have? (0.67) AVG_CHARACTER_WEAPONS = """ SELECT AVG(weapon_count) as avg_weapons_per_char FROM ( SELECT character.character_id, COUNT(DISTINCT weapon.item_ptr_id) as weapon_count FROM charactercreator_character AS character LEFT JOIN charactercreator_character_inventory inventory -- characters may have zero items ON character.character_id = inventory.character_id LEFT JOIN armory_weapon weapon -- many items are not weapons, so only retain weapons ON inventory.item_id = weapon.item_ptr_id GROUP BY character.character_id ) subq; """
3.4375
3
pixelproject/grid.py
MickaelRigault/pixelproject
0
924
<filename>pixelproject/grid.py #! /usr/bin/env python # import warnings import numpy as np UNIT_SQUARE = np.asarray([[0,0],[0,1],[1,1],[1,0]])-0.5 from propobject import BaseObject from shapely import geometry import pandas import geopandas # ======================= # # # # Functions # # # # ======================= # def get_simple_grid(xbounds, ybounds, shift_origin=None): """ """ xbounds = np.atleast_1d(xbounds) if len(xbounds)==1: xmin,xmax = 0,xbounds[0] else: xmin,xmax = xbounds ybounds = np.atleast_1d(ybounds) if len(ybounds)==1: ymin,ymax = 0,ybounds[0] else: ymin,ymax = ybounds pixels = np.mgrid[xmin:xmax,ymin:ymax] pixels2_flat = np.concatenate(pixels.T, axis=0) if shift_origin is not None: # not += because conflict between int and float array pixels2_flat = pixels2_flat+ shift_origin return Grid(pixels2_flat, UNIT_SQUARE) # ======================= # # # # Classes # # # # ======================= # class GridProjector( BaseObject ): """ """ PROPERTIES = ["gridin", "gridout"] DERIVED_PROPERTIES = ["gridinterest"] def __init__(self, grid_in=None, grid_out=None): """ """ if grid_in is not None: self.set_grid(grid_in, "in") if grid_out is not None: self.set_grid(grid_out, "out") # =================== # # Methods # # =================== # # --------- # # SETTER # # --------- # def set_grid(self, grid, which="in"): """ """ if which not in ["in","out"]: raise ValueError("Which should either be 'in' our 'out'") self._properties["grid%s"%which] = grid self._derived_properties["gridinterest"] = None def _measure_gridinterest_(self): """ """ # -- internal -- # def localdef_get_area(l): return l.geometry.area/self.gridin.geodataframe.iloc[l.id_1].geometry.area # -------------- # if self.gridin is not None and self.gridout is not None: # # Most likely there is a faster method if is_shape_unique # self._derived_properties["gridinterest"] = geopandas.overlay(self.gridin.geodataframe, self.gridout.geodataframe, how='intersection') self.gridinterest["area"] = self.gridinterest.apply(localdef_get_area, axis=1) else: warnings.warn("Cannot measure gridinterest, because gridin and/or gridout is/are None") # -------------- # # Measurement # # -------------- # def project_data(self, data, as_serie=True, use="sum"): """ Use gridinteresect Parameters ---------- data: [ndarray or string or pandas.Serie] data associated to gridin that should be projected in gridout. could be: - ndarray: must have the same length as gridin - string: name of a gridin column (pandas) - pandas.Serie: serie that will be matched with gridin """ # Calcul itself projected_data = self._project_data_(self._parse_data_(data), use=use) if as_serie: return projected_data projected_data_array = np.zeros( len(self.gridout.geodataframe) ) projected_data_array[projected_data.index.values] = projected_data.values return projected_data_array def _project_data_(self, data, use="sum"): """ """ self.gridinterest["_tmp"] = data[ self.gridin.geodataframe.loc[ self.gridinterest["id_1"]].index ] * self.gridinterest["area"] return getattr(self.gridinterest.groupby("id_2")["_tmp"],use)() def _parse_data_(self,data): """ Parameters ---------- data: [ndarray or string or pandas.Serie] data associated to gridin that should be projected in gridout. could be: - ndarray: must have the same length as gridin - string: name of a gridin column (pandas) - pandas.Serie: serie that will be matched with gridin Returns ------- ndarray """ if type(data) == str: if data not in self.gridin.geodataframe.columns: raise ValueError("Unknown gridin column '%s'"%data) return self.gridin.geodataframe[data].values elif type(data) == pandas.Series: return data.values elif len(data) != len(self.gridin.geodataframe): raise ValueError("data given as ndarray but lengthes do not match") return data # =================== # # Properties # # =================== # @property def gridin(self): """ """ return self._properties["gridin"] @property def gridout(self): """ """ return self._properties["gridout"] @property def gridinterest(self): """ """ if self._derived_properties["gridinterest"] is None: self._measure_gridinterest_() return self._derived_properties["gridinterest"] class Grid( BaseObject ): PROPERTIES = ["pixels", "shape"] SIDE_PROPERTIES = ["indexes"] DERIVED_PROPERTIES = ["vertices","geodataframe", "triangulation"] def __init__(self, pixels=None, shape=UNIT_SQUARE, indexes=None): """ """ if pixels is not None: self.set_pixels(pixels,shape=shape) if indexes is not None: self.set_indexes(indexes) # =================== # # Methods # # =================== # @classmethod def from_stamps(cls, stamp, origin=[0,0]): """ stamps are 2d array, something you could to ax.imshow(stamps) data will be stored as 'data' in the grid's dataframe """ this = get_simple_grid(*np.shape(stamp), shift_origin=origin) this.add_data(np.ravel(stamp), "data") return this @classmethod def from_vertices(cls, vertices, indexes=None): """ directly provide the vertices Parameters: ----------- vertices: [list of array or dictionary] The vertices of all the grid entries. Could have two format: - list of array: [[vert_1],[vert_2],....], then you may want to provide indexes - dictionary: {id_1:vert_1,id_2: vert_2, ...} if a dictionary is provided, the indexes will be set by the vertices. indexes: [list or None] -optional- (Ignored if vertices is a dict) If you provide vertices as a list of vertices, you can provide the indexes of each of the vertices. -> if None, then indexes = np.arange(len(vertices)) Returns ------- Grid """ this = cls() if type(vertices) is dict: indexes, vertices = list(vertices.keys()), list(vertices.values()) this.set_vertices(vertices) if indexes is not None: this.set_indexes(indexes) return this @classmethod def set_from(cls, datainput): """ Creates a new Grid objects from the given input data: Parameters ---------- datainput: [geopandas.geodataframe.GeoDataFrame or ndarray] this could either be a: - geodataframe (and this calls self.set_geodataframe) - geoSeries - ndarray: if 3-shaped, this calls set_vertices ; if 2-shaped, this calls set_pixels. Returns ------- Grid """ this = cls() if type(datainput) == geopandas.geodataframe.GeoDataFrame: this.set_geodataframe(datainput) return this if type(datainput) == np.ndarray: if len(np.shape( datainput) ) == 3: # vertices this.set_vertices(datainput) elif len(np.shape( datainput) ) == 3: # pixels this.set_pixels(datainput) else: raise TypeError("cannot parse the shape of the given datainput") return this raise TypeError("cannot parse the format of the given input") # --------- # # SETTER # # --------- # def set_indexes(self, indexes, update=True): """ provide the indexes associated to each pixels Parameters ---------- indexes: [ndarray] indexes associated to the pixels. This should have the length equal to th number of pixels (if any). update: [bool] -optional- should the geodataframe be updated ? [use True if you are not sure] Returns ------- Void """ if self.pixels is not None and len(indexes) != self.npixels: raise AssertionError("not the same number of indexes as the number of pixels") self._side_properties["indexes"] = indexes if update: self._update_geodataframe_() def set_pixels(self, pixels, shape=None, update=True): """ provide the pixels. Pixels define the position up on which the geometries are defined. NB: vertices = pixels+shape """ # Setting the pixels if np.shape(pixels)[-1] != 2: raise ValueError("pixels must be [N,2] arrays") self._properties["pixels"] = np.asarray(pixels) if shape is not None: self.set_pixelshapes(shape, update=False) if update: self._update_geodataframe_() def set_pixelshapes(self, shape, update=True): """ """ # Setting the pixel shape.s if len(np.shape(shape))==2: self._properties["shape"] = np.asarray(shape) elif len(np.shape(shape))==3: if self.pixels is not None and np.shape(shape)[0] != self.npixels: raise AssertionError("`shape` must be unique or have the same lenth as pixels") self._properties["shape"] = np.asarray(shape) else: raise ValueError("Cannot parse the given shape, must be [M,2] or [N,M,2] when N is the number of pixel and M the number of vertices") if update: self._update_geodataframe_() def set_vertices(self, vertices, overwrite=False, **kwargs): """ """ if not overwrite and (self.pixels is not None and self.shape is not None): raise ValueError("Pixels and shape already defined. set the overwrite option to true, to update vertices") try: pixels = np.mean(vertices, axis=1) except: # Means vertices have different size. self._derived_properties["vertices"] = vertices pixels = np.asarray([np.mean(v_, axis=0) for v_ in vertices]) self.set_pixels(pixels, None, **kwargs) return self._derived_properties["vertices"] = np.asarray(vertices) shape = self.vertices - pixels[:,None] shape_unique = np.unique(shape, axis=0) if len(shape_unique)==1: shape = shape_unique[0] self.set_pixels(pixels, shape, **kwargs) def set_geodataframe(self, geodataframe, overwrite=False): """ """ if not overwrite and (self.pixels is not None and self.shape is not None): raise ValueError("Pixels and shape already defined. set the overwrite option to true, to update geodataframe") if "geometry" not in geodataframe.columns: raise TypeError("The given geodataframe does not have 'geometry' column. It is required") self._derived_properties["geodataframe"] = geodataframe if "id" not in geodataframe.columns: self.geodataframe["id"] = self.indexes if self.pixels is not None else np.arange( len(geodataframe) ) # - get the vertices: def get_verts(poly_): return np.stack(poly_.exterior.xy).T[:-1] vertices = geodataframe["geometry"].apply(get_verts).values self.set_vertices(vertices, update=False) # don't update the geodataframe # --------- # # UPDATE # # --------- # def _update_geodataframe_(self): """ """ dataseries = self.get_geoseries() x,y = self.pixels.T self._derived_properties["geodataframe"] = \ geopandas.GeoDataFrame({'geometry': dataseries, 'id':self.indexes, 'x':x,'y':y}) def add_data(self, data, name, indexes=None, inplace=True): """ """ if indexes is None: indexes = self.indexes s_ = pandas.Series(data, name=name, index=indexes) if not inplace: return self.geodataframe.join(s_) self._derived_properties["geodataframe"] = self.geodataframe.join(s_) # --------- # # GETTER # # --------- # def get_geoseries(self): """ build a new geodataframe and returns it. """ import geopandas return geopandas.GeoSeries([geometry.Polygon(v) for v in self.vertices]) def get_triangulation_grid(self): """ Returns a grid of triangulation. """ return Grid.set_from( np.concatenate(self.triangulation, axis=0) ) def get_pixels_in(self, polygon, invert=False): """ checks if the centroid of the pixel is in or out the given shapely polygon. Parameters ---------- polygon: [shapely.geometry] reference polygon invert: [bool] -optional- Get the pixel inside the polygon [invert=False] or outsite [invert=True] Returns ------- list of pixels and boolean mask """ from shapely import vectorized flagin = vectorized.contains(polygon, *self.pixels.T) if invert: flagin = ~flagin return self.pixels[flagin], flagin # --------- # # Project # # --------- # def project_to(self, othergrid, column="*", asgrid=True, use="sum"): """ project data in the given grid Parameters ---------- othergrid: [Grid] New grid where data should be projected to column: [str/None/list of] -optional- Which data should be projected ? If None or '*' all the non-structural columns will be (structural columns are 'geometry', 'id', 'x', 'y') asgrid: [bool] -optional- Should this return a new Grid (actually same object as othergrid) or a dict [asgrid=False]? Returns ------- Grid or dict (see asgrid) """ gproj = GridProjector(self, othergrid) if column is None or column in ["*","all"]: column = [k for k in self.geodataframe if k not in ['geometry', 'id', 'x', 'y']] datas = {k:gproj.project_data(k, use=use) for k in column} if not asgrid: return datas # building and setting the new grid gout = othergrid.__class__.set_from(othergrid.geodataframe) for k in column: gout.add_data(datas[k],k) return gout def project_to_wcs(self, wcs_, asgrid=True, **kwargs): """ provide an astropy.wcs.WCS and this will project the current grid into it (assuming grid's vertices coordinates are in pixels) Parameters ---------- wcs_: [astropy.wcs.WCS] The world coordinate solution asgrid: [bool] -optional- Should this return a load Grid object or an array of vertices (in degree) **kwargs goes to wcs_.all_pix2world Returns ------- Grid or array (see asgrid) """ verts = self.vertices verts_shape = np.shape(verts) flatten_verts = np.concatenate(verts, axis=0) # flatten_verts_wcs = np.asarray(wcs_.all_pix2world(flatten_verts[:,0], flatten_verts[:,1], 0, **kwargs)).T # verts_wcs = flatten_verts_wcs.reshape(verts_shape) if not asgrid: return verts_wcs g_wcs = Grid.set_from(verts_wcs) g_wcs.geodataframe["x_pix"],g_wcs.geodataframe["y_pix"] = self.pixels.T return g_wcs def evaluate(self, func, vectorized=True): """ Evaluate the given function throughout the grid. This evulation is using polynome triangulation to integrate the given function inside the polyname using triangle integration. -> dependency: the integration is made using quadpy. Examples: # Remark the np.stack(x, axis=-1). # This is mandatory since integration is going to send # x = [ [[....],[...]], [[....],[...]], ... ] for triangles ```python def get_2dgauss(x, mu=[4,4], cov=[[1,0],[0,2]]): """ """ return stats.multivariate_normal.pdf(np.stack(x, axis=-1), mean=mu, cov=cov) ``` """ try: import quadpy except ImportError: raise ImportError("Integration is made using quadpy. pip install quadpy") # Is Triangulation made ? if self._derived_properties["triangulation"] is None: warnings.warn("triangles not defined: deriving triangulation.") self.derive_triangulation() # Let's get the triangles trs = np.stack(self.triangulation) shape_trs = np.shape(trs) if len(shape_trs)==4 and vectorized: # All Polygon have the same topology (same amount of vertices) tr_flat = np.stack(np.concatenate(trs, axis=0), axis=-2) val = quadpy.triangle.strang_fix_cowper_09().integrate(func,tr_flat).reshape(shape_trs[:2]) else: val = np.asarray([quadpy.triangle.strang_fix_cowper_09().integrate(func,np.stack(t_, axis=-2)) for t_ in trs]) return np.sum(val, axis=1) def derive_triangulation(self, fast_unique=True): """ """ def triangulate(geom): """ Return triangulate format that quadpy likes """ from shapely import ops triangles = ops.triangulate(geom) return np.stack([np.asarray(t.exterior.coords.xy).T[:-1] for t in triangles]) if not self.is_shape_unique or not fast_unique: self._derived_properties["triangulation"] = self.geodataframe["geometry"].apply(triangulate) else: self._derived_properties["triangulation"] = self.pixels[:,None,None] + triangulate(geometry.Polygon(self.shape)) # --------- # # PLOTTER # # --------- # def show(self, column=None, ax=None, edgecolor="0.7", facecolor="None", **kwargs): """ """ if column is not None: facecolor=None return self.geodataframe.plot(column, ax=ax,facecolor=facecolor, edgecolor=edgecolor, **kwargs) # =================== # # Properties # # =================== # @property def pixels(self): """ """ return self._properties["pixels"] @property def npixels(self): """ """ return len(self.pixels) @property def shape(self): """ """ if self._properties["shape"] is None: self._properties["shape"] = UNIT_SQUARE return self._properties["shape"] # -- Side @property def indexes(self): """ """ if self._side_properties["indexes"] is None: self._side_properties["indexes"] = np.arange(self.npixels) return self._side_properties["indexes"] # -- Derived @property def vertices(self): """ """ if self._derived_properties["vertices"] is None and (self.pixels is not None and self.shape is not None): self._derived_properties["vertices"] = self.pixels[:,None]+self.shape return self._derived_properties["vertices"] @property def is_shape_unique(self): """ """ return len(np.shape(self.shape))==2 @property def geodataframe(self): """ """ if self._derived_properties["geodataframe"] is None: self._update_geodataframe_() return self._derived_properties["geodataframe"] @property def triangulation(self): """ Triangulation of the vertices. Based on Delaunay tesselation, see shapely.ops.triangulate """ if self._derived_properties["triangulation"] is None: self.derive_triangulation() return self._derived_properties["triangulation"]
2.9375
3
numpy/lib/format.py
AnirudhDagar/numpy
5
925
""" Binary serialization NPY format ========== A simple format for saving numpy arrays to disk with the full information about them. The ``.npy`` format is the standard binary file format in NumPy for persisting a *single* arbitrary NumPy array on disk. The format stores all of the shape and dtype information necessary to reconstruct the array correctly even on another machine with a different architecture. The format is designed to be as simple as possible while achieving its limited goals. The ``.npz`` format is the standard format for persisting *multiple* NumPy arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy`` files, one for each array. Capabilities ------------ - Can represent all NumPy arrays including nested record arrays and object arrays. - Represents the data in its native binary form. - Supports Fortran-contiguous arrays directly. - Stores all of the necessary information to reconstruct the array including shape and dtype on a machine of a different architecture. Both little-endian and big-endian arrays are supported, and a file with little-endian numbers will yield a little-endian array on any machine reading the file. The types are described in terms of their actual sizes. For example, if a machine with a 64-bit C "long int" writes out an array with "long ints", a reading machine with 32-bit C "long ints" will yield an array with 64-bit integers. - Is straightforward to reverse engineer. Datasets often live longer than the programs that created them. A competent developer should be able to create a solution in their preferred programming language to read most ``.npy`` files that they have been given without much documentation. - Allows memory-mapping of the data. See `open_memmap`. - Can be read from a filelike stream object instead of an actual file. - Stores object arrays, i.e. arrays containing elements that are arbitrary Python objects. Files with object arrays are not to be mmapable, but can be read and written to disk. Limitations ----------- - Arbitrary subclasses of numpy.ndarray are not completely preserved. Subclasses will be accepted for writing, but only the array data will be written out. A regular numpy.ndarray object will be created upon reading the file. .. warning:: Due to limitations in the interpretation of structured dtypes, dtypes with fields with empty names will have the names replaced by 'f0', 'f1', etc. Such arrays will not round-trip through the format entirely accurately. The data is intact; only the field names will differ. We are working on a fix for this. This fix will not require a change in the file format. The arrays with such structures can still be saved and restored, and the correct dtype may be restored by using the ``loadedarray.view(correct_dtype)`` method. File extensions --------------- We recommend using the ``.npy`` and ``.npz`` extensions for files saved in this format. This is by no means a requirement; applications may wish to use these file formats but use an extension specific to the application. In the absence of an obvious alternative, however, we suggest using ``.npy`` and ``.npz``. Version numbering ----------------- The version numbering of these formats is independent of NumPy version numbering. If the format is upgraded, the code in `numpy.io` will still be able to read and write Version 1.0 files. Format Version 1.0 ------------------ The first 6 bytes are a magic string: exactly ``\\x93NUMPY``. The next 1 byte is an unsigned byte: the major version number of the file format, e.g. ``\\x01``. The next 1 byte is an unsigned byte: the minor version number of the file format, e.g. ``\\x00``. Note: the version of the file format is not tied to the version of the numpy package. The next 2 bytes form a little-endian unsigned short int: the length of the header data HEADER_LEN. The next HEADER_LEN bytes form the header data describing the array's format. It is an ASCII string which contains a Python literal expression of a dictionary. It is terminated by a newline (``\\n``) and padded with spaces (``\\x20``) to make the total of ``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible by 64 for alignment purposes. The dictionary contains three keys: "descr" : dtype.descr An object that can be passed as an argument to the `numpy.dtype` constructor to create the array's dtype. "fortran_order" : bool Whether the array data is Fortran-contiguous or not. Since Fortran-contiguous arrays are a common form of non-C-contiguity, we allow them to be written directly to disk for efficiency. "shape" : tuple of int The shape of the array. For repeatability and readability, the dictionary keys are sorted in alphabetic order. This is for convenience only. A writer SHOULD implement this if possible. A reader MUST NOT depend on this. Following the header comes the array data. If the dtype contains Python objects (i.e. ``dtype.hasobject is True``), then the data is a Python pickle of the array. Otherwise the data is the contiguous (either C- or Fortran-, depending on ``fortran_order``) bytes of the array. Consumers can figure out the number of bytes by multiplying the number of elements given by the shape (noting that ``shape=()`` means there is 1 element) by ``dtype.itemsize``. Format Version 2.0 ------------------ The version 1.0 format only allowed the array header to have a total size of 65535 bytes. This can be exceeded by structured arrays with a large number of columns. The version 2.0 format extends the header size to 4 GiB. `numpy.save` will automatically save in 2.0 format if the data requires it, else it will always use the more compatible 1.0 format. The description of the fourth element of the header therefore has become: "The next 4 bytes form a little-endian unsigned int: the length of the header data HEADER_LEN." Format Version 3.0 ------------------ This version replaces the ASCII string (which in practice was latin1) with a utf8-encoded string, so supports structured types with any unicode field names. Notes ----- The ``.npy`` format, including motivation for creating it and a comparison of alternatives, is described in the :doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have evolved with time and this document is more current. """ import numpy import io import warnings from numpy.lib.utils import safe_eval from numpy.compat import ( isfileobj, os_fspath, pickle ) __all__ = [] EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'} MAGIC_PREFIX = b'\x93NUMPY' MAGIC_LEN = len(MAGIC_PREFIX) + 2 ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096 BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes # difference between version 1.0 and 2.0 is a 4 byte (I) header length # instead of 2 bytes (H) allowing storage of large structured arrays _header_size_info = { (1, 0): ('<H', 'latin1'), (2, 0): ('<I', 'latin1'), (3, 0): ('<I', 'utf8'), } def _check_version(version): if version not in [(1, 0), (2, 0), (3, 0), None]: msg = "we only support format version (1,0), (2,0), and (3,0), not %s" raise ValueError(msg % (version,)) def magic(major, minor): """ Return the magic string for the given file format version. Parameters ---------- major : int in [0, 255] minor : int in [0, 255] Returns ------- magic : str Raises ------ ValueError if the version cannot be formatted. """ if major < 0 or major > 255: raise ValueError("major version must be 0 <= major < 256") if minor < 0 or minor > 255: raise ValueError("minor version must be 0 <= minor < 256") return MAGIC_PREFIX + bytes([major, minor]) def read_magic(fp): """ Read the magic string to get the version of the file format. Parameters ---------- fp : filelike object Returns ------- major : int minor : int """ magic_str = _read_bytes(fp, MAGIC_LEN, "magic string") if magic_str[:-2] != MAGIC_PREFIX: msg = "the magic string is not correct; expected %r, got %r" raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2])) major, minor = magic_str[-2:] return major, minor def _has_metadata(dt): if dt.metadata is not None: return True elif dt.names is not None: return any(_has_metadata(dt[k]) for k in dt.names) elif dt.subdtype is not None: return _has_metadata(dt.base) else: return False def dtype_to_descr(dtype): """ Get a serializable descriptor from the dtype. The .descr attribute of a dtype object cannot be round-tripped through the dtype() constructor. Simple types, like dtype('float32'), have a descr which looks like a record array with one field with '' as a name. The dtype() constructor interprets this as a request to give a default name. Instead, we construct descriptor that can be passed to dtype(). Parameters ---------- dtype : dtype The dtype of the array that will be written to disk. Returns ------- descr : object An object that can be passed to `numpy.dtype()` in order to replicate the input dtype. """ if _has_metadata(dtype): warnings.warn("metadata on a dtype may be saved or ignored, but will " "raise if saved when read. Use another form of storage.", UserWarning, stacklevel=2) if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get # fiddled with. This needs to be fixed in the C implementation of # dtype(). return dtype.descr else: return dtype.str def descr_to_dtype(descr): """ Returns a dtype based off the given description. This is essentially the reverse of `dtype_to_descr()`. It will remove the valueless padding fields created by, i.e. simple fields like dtype('float32'), and then convert the description to its corresponding dtype. Parameters ---------- descr : object The object retreived by dtype.descr. Can be passed to `numpy.dtype()` in order to replicate the input dtype. Returns ------- dtype : dtype The dtype constructed by the description. """ if isinstance(descr, str): # No padding removal needed return numpy.dtype(descr) elif isinstance(descr, tuple): # subtype, will always have a shape descr[1] dt = descr_to_dtype(descr[0]) return numpy.dtype((dt, descr[1])) titles = [] names = [] formats = [] offsets = [] offset = 0 for field in descr: if len(field) == 2: name, descr_str = field dt = descr_to_dtype(descr_str) else: name, descr_str, shape = field dt = numpy.dtype((descr_to_dtype(descr_str), shape)) # Ignore padding bytes, which will be void bytes with '' as name # Once support for blank names is removed, only "if name == ''" needed) is_pad = (name == '' and dt.type is numpy.void and dt.names is None) if not is_pad: title, name = name if isinstance(name, tuple) else (None, name) titles.append(title) names.append(name) formats.append(dt) offsets.append(offset) offset += dt.itemsize return numpy.dtype({'names': names, 'formats': formats, 'titles': titles, 'offsets': offsets, 'itemsize': offset}) def header_data_from_array_1_0(array): """ Get the dictionary of header metadata from a numpy.ndarray. Parameters ---------- array : numpy.ndarray Returns ------- d : dict This has the appropriate entries for writing its string representation to the header of the file. """ d = {'shape': array.shape} if array.flags.c_contiguous: d['fortran_order'] = False elif array.flags.f_contiguous: d['fortran_order'] = True else: # Totally non-contiguous data. We will have to make it C-contiguous # before writing. Note that we need to test for C_CONTIGUOUS first # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS. d['fortran_order'] = False d['descr'] = dtype_to_descr(array.dtype) return d def _wrap_header(header, version): """ Takes a stringified header, and attaches the prefix and padding to it """ import struct assert version is not None fmt, encoding = _header_size_info[version] if not isinstance(header, bytes): # always true on python 3 header = header.encode(encoding) hlen = len(header) + 1 padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN) try: header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen) except struct.error: msg = "Header length {} too big for version={}".format(hlen, version) raise ValueError(msg) from None # Pad the header with spaces and a final newline such that the magic # string, the header-length short and the header are aligned on a # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes # aligned up to ARRAY_ALIGN on systems like Linux where mmap() # offset must be page-aligned (i.e. the beginning of the file). return header_prefix + header + b' '*padlen + b'\n' def _wrap_header_guess_version(header): """ Like `_wrap_header`, but chooses an appropriate version given the contents """ try: return _wrap_header(header, (1, 0)) except ValueError: pass try: ret = _wrap_header(header, (2, 0)) except UnicodeEncodeError: pass else: warnings.warn("Stored array in format 2.0. It can only be" "read by NumPy >= 1.9", UserWarning, stacklevel=2) return ret header = _wrap_header(header, (3, 0)) warnings.warn("Stored array in format 3.0. It can only be " "read by NumPy >= 1.17", UserWarning, stacklevel=2) return header def _write_array_header(fp, d, version=None): """ Write the header for an array and returns the version used Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. version: tuple or None None means use oldest that works explicit version will raise a ValueError if the format does not allow saving this data. Default: None """ header = ["{"] for key, value in sorted(d.items()): # Need to use repr here, since we eval these when reading header.append("'%s': %s, " % (key, repr(value))) header.append("}") header = "".join(header) if version is None: header = _wrap_header_guess_version(header) else: header = _wrap_header(header, version) fp.write(header) def write_array_header_1_0(fp, d): """ Write the header for an array using the 1.0 format. Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. """ _write_array_header(fp, d, (1, 0)) def write_array_header_2_0(fp, d): """ Write the header for an array using the 2.0 format. The 2.0 format allows storing very large structured arrays. .. versionadded:: 1.9.0 Parameters ---------- fp : filelike object d : dict This has the appropriate entries for writing its string representation to the header of the file. """ _write_array_header(fp, d, (2, 0)) def read_array_header_1_0(fp): """ Read an array header from a filelike object using the 1.0 file format version. This will leave the file object located just after the header. Parameters ---------- fp : filelike object A file object or something with a `.read()` method like a file. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. Raises ------ ValueError If the data is invalid. """ return _read_array_header(fp, version=(1, 0)) def read_array_header_2_0(fp): """ Read an array header from a filelike object using the 2.0 file format version. This will leave the file object located just after the header. .. versionadded:: 1.9.0 Parameters ---------- fp : filelike object A file object or something with a `.read()` method like a file. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. Raises ------ ValueError If the data is invalid. """ return _read_array_header(fp, version=(2, 0)) def _filter_header(s): """Clean up 'L' in npz header ints. Cleans up the 'L' in strings representing integers. Needed to allow npz headers produced in Python2 to be read in Python3. Parameters ---------- s : string Npy file header. Returns ------- header : str Cleaned up header. """ import tokenize from io import StringIO tokens = [] last_token_was_number = False for token in tokenize.generate_tokens(StringIO(s).readline): token_type = token[0] token_string = token[1] if (last_token_was_number and token_type == tokenize.NAME and token_string == "L"): continue else: tokens.append(token) last_token_was_number = (token_type == tokenize.NUMBER) return tokenize.untokenize(tokens) def _read_array_header(fp, version): """ see read_array_header_1_0 """ # Read an unsigned, little-endian short int which has the length of the # header. import struct hinfo = _header_size_info.get(version) if hinfo is None: raise ValueError("Invalid version {!r}".format(version)) hlength_type, encoding = hinfo hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") header_length = struct.unpack(hlength_type, hlength_str)[0] header = _read_bytes(fp, header_length, "array header") header = header.decode(encoding) # The header is a pretty-printed string representation of a literal # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte # boundary. The keys are strings. # "shape" : tuple of int # "fortran_order" : bool # "descr" : dtype.descr # Versions (2, 0) and (1, 0) could have been created by a Python 2 # implementation before header filtering was implemented. if version <= (2, 0): header = _filter_header(header) try: d = safe_eval(header) except SyntaxError as e: msg = "Cannot parse header: {!r}" raise ValueError(msg.format(header)) from e if not isinstance(d, dict): msg = "Header is not a dictionary: {!r}" raise ValueError(msg.format(d)) if EXPECTED_KEYS != d.keys(): keys = sorted(d.keys()) msg = "Header does not contain the correct keys: {!r}" raise ValueError(msg.format(keys)) # Sanity-check the values. if (not isinstance(d['shape'], tuple) or not all(isinstance(x, int) for x in d['shape'])): msg = "shape is not valid: {!r}" raise ValueError(msg.format(d['shape'])) if not isinstance(d['fortran_order'], bool): msg = "fortran_order is not a valid bool: {!r}" raise ValueError(msg.format(d['fortran_order'])) try: dtype = descr_to_dtype(d['descr']) except TypeError as e: msg = "descr is not a valid dtype descriptor: {!r}" raise ValueError(msg.format(d['descr'])) from e return d['shape'], d['fortran_order'], dtype def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): """ Write an array to an NPY file, including a header. If the array is neither C-contiguous nor Fortran-contiguous AND the file_like object is not a real file object, this function will have to copy data in memory. Parameters ---------- fp : file_like object An open, writable file object, or similar object with a ``.write()`` method. array : ndarray The array to write to disk. version : (int, int) or None, optional The version number of the format. None means use the oldest supported version that is able to store the data. Default: None allow_pickle : bool, optional Whether to allow writing pickled data. Default: True pickle_kwargs : dict, optional Additional keyword arguments to pass to pickle.dump, excluding 'protocol'. These are only useful when pickling objects in object arrays on Python 3 to Python 2 compatible format. Raises ------ ValueError If the array cannot be persisted. This includes the case of allow_pickle=False and array being an object array. Various other errors If the array contains Python objects as part of its dtype, the process of pickling them may raise various errors if the objects are not picklable. """ _check_version(version) _write_array_header(fp, header_data_from_array_1_0(array), version) if array.itemsize == 0: buffersize = 0 else: # Set buffer size to 16 MiB to hide the Python loop overhead. buffersize = max(16 * 1024 ** 2 // array.itemsize, 1) if array.dtype.hasobject: # We contain Python objects so we cannot write out the data # directly. Instead, we will pickle it out if not allow_pickle: raise ValueError("Object arrays cannot be saved when " "allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} pickle.dump(array, fp, protocol=3, **pickle_kwargs) elif array.flags.f_contiguous and not array.flags.c_contiguous: if isfileobj(fp): array.T.tofile(fp) else: for chunk in numpy.nditer( array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='F'): fp.write(chunk.tobytes('C')) else: if isfileobj(fp): array.tofile(fp) else: for chunk in numpy.nditer( array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order='C'): fp.write(chunk.tobytes('C')) def read_array(fp, allow_pickle=False, pickle_kwargs=None): """ Read an array from an NPY file. Parameters ---------- fp : file_like object If this is not a real file object, then this may take extra memory and time. allow_pickle : bool, optional Whether to allow writing pickled data. Default: False .. versionchanged:: 1.16.3 Made default False in response to CVE-2019-6446. pickle_kwargs : dict Additional keyword arguments to pass to pickle.load. These are only useful when loading object arrays saved on Python 2 when using Python 3. Returns ------- array : ndarray The array from the data on disk. Raises ------ ValueError If the data is invalid, or allow_pickle=False and the file contains an object array. """ version = read_magic(fp) _check_version(version) shape, fortran_order, dtype = _read_array_header(fp, version) if len(shape) == 0: count = 1 else: count = numpy.multiply.reduce(shape, dtype=numpy.int64) # Now read the actual data. if dtype.hasobject: # The array contained Python objects. We need to unpickle the data. if not allow_pickle: raise ValueError("Object arrays cannot be loaded when " "allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} try: array = pickle.load(fp, **pickle_kwargs) except UnicodeError as err: # Friendlier error message raise UnicodeError("Unpickling a python object failed: %r\n" "You may need to pass the encoding= option " "to numpy.load" % (err,)) from err else: if isfileobj(fp): # We can use the fast fromfile() function. array = numpy.fromfile(fp, dtype=dtype, count=count) else: # This is not a real file. We have to read it the # memory-intensive way. # crc32 module fails on reads greater than 2 ** 32 bytes, # breaking large reads from gzip streams. Chunk reads to # BUFFER_SIZE bytes to avoid issue and reduce memory overhead # of the read. In non-chunked case count < max_read_count, so # only one read is performed. # Use np.ndarray instead of np.empty since the latter does # not correctly instantiate zero-width string dtypes; see # https://github.com/numpy/numpy/pull/6430 array = numpy.ndarray(count, dtype=dtype) if dtype.itemsize > 0: # If dtype.itemsize == 0 then there's nothing more to read max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize) for i in range(0, count, max_read_count): read_count = min(max_read_count, count - i) read_size = int(read_count * dtype.itemsize) data = _read_bytes(fp, read_size, "array data") array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype, count=read_count) if fortran_order: array.shape = shape[::-1] array = array.transpose() else: array.shape = shape return array def open_memmap(filename, mode='r+', dtype=None, shape=None, fortran_order=False, version=None): """ Open a .npy file as a memory-mapped array. This may be used to read an existing file or create a new one. Parameters ---------- filename : str or path-like The name of the file on disk. This may *not* be a file-like object. mode : str, optional The mode in which to open the file; the default is 'r+'. In addition to the standard file modes, 'c' is also accepted to mean "copy on write." See `memmap` for the available mode strings. dtype : data-type, optional The data type of the array if we are creating a new file in "write" mode, if not, `dtype` is ignored. The default value is None, which results in a data-type of `float64`. shape : tuple of int The shape of the array if we are creating a new file in "write" mode, in which case this parameter is required. Otherwise, this parameter is ignored and is thus optional. fortran_order : bool, optional Whether the array should be Fortran-contiguous (True) or C-contiguous (False, the default) if we are creating a new file in "write" mode. version : tuple of int (major, minor) or None If the mode is a "write" mode, then this is the version of the file format used to create the file. None means use the oldest supported version that is able to store the data. Default: None Returns ------- marray : memmap The memory-mapped array. Raises ------ ValueError If the data or the mode is invalid. IOError If the file is not found or cannot be opened correctly. See Also -------- numpy.memmap """ if isfileobj(filename): raise ValueError("Filename must be a string or a path-like object." " Memmap cannot use existing file handles.") if 'w' in mode: # We are creating the file, not reading it. # Check if we ought to create the file. _check_version(version) # Ensure that the given dtype is an authentic dtype object rather # than just something that can be interpreted as a dtype object. dtype = numpy.dtype(dtype) if dtype.hasobject: msg = "Array can't be memory-mapped: Python objects in dtype." raise ValueError(msg) d = dict( descr=dtype_to_descr(dtype), fortran_order=fortran_order, shape=shape, ) # If we got here, then it should be safe to create the file. with open(os_fspath(filename), mode+'b') as fp: _write_array_header(fp, d, version) offset = fp.tell() else: # Read the header of the file first. with open(os_fspath(filename), 'rb') as fp: version = read_magic(fp) _check_version(version) shape, fortran_order, dtype = _read_array_header(fp, version) if dtype.hasobject: msg = "Array can't be memory-mapped: Python objects in dtype." raise ValueError(msg) offset = fp.tell() if fortran_order: order = 'F' else: order = 'C' # We need to change a write-only mode to a read-write mode since we've # already written data to the file. if mode == 'w+': mode = 'r+' marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order, mode=mode, offset=offset) return marray def _read_bytes(fp, size, error_template="ran out of data"): """ Read from file-like object until size bytes are read. Raises ValueError if not EOF is encountered before size bytes are read. Non-blocking objects only supported if they derive from io objects. Required as e.g. ZipExtFile in python 2.6 can return less data than requested. """ data = bytes() while True: # io files (default in python3) return None or raise on # would-block, python2 file will truncate, probably nothing can be # done about that. note that regular files can't be non-blocking try: r = fp.read(size - len(data)) data += r if len(r) == 0 or len(data) == size: break except io.BlockingIOError: pass if len(data) != size: msg = "EOF: reading %s, expected %d bytes got %d" raise ValueError(msg % (error_template, size, len(data))) else: return data
3.390625
3
gva/data/validator/is_valid_enum.py
gva-jjoyce/gva_data
0
926
<gh_stars>0 """ Enumerator Test """ from typing import Any class is_valid_enum(): """ Test if a variable is on a list of valid values """ __slots__ = ('symbols') def __init__(self, **kwargs): """ -> "type": "enum", "symbols": ["up", "down"] symbols: list of allowed values (case sensitive) """ self.symbols = kwargs.get('symbols', ()) def __call__(self, value: Any) -> bool: return value and value in self.symbols def __str__(self): return f'enum {self.symbols}'
3.28125
3
events_page/app.py
los-verdes/lv-event-pagenerator
0
927
#!/usr/bin/env python from zoneinfo import ZoneInfo import flask from dateutil.parser import parse from flask_assets import Bundle, Environment from logzero import logger, setup_logger from webassets.filter import get_filter from config import cfg from apis import calendar as gcal setup_logger(name=__name__) app = flask.Flask(__name__) libsass = get_filter( "libsass", as_output=True, style="compressed", ) assets = Environment(app) # create an Environment instance bundles = { # define nested Bundle "style": Bundle( "scss/*.scss", filters=(libsass), output="style.css", ) } assets.register(bundles) @app.route("/") def events(): return flask.render_template( "index.html", calendar=gcal.load_calendar( service=gcal.build_service(), calendar_id=cfg.calendar_id, ), ) @app.template_filter() def parse_tz_datetime(datetime_str): return parse(datetime_str).replace(tzinfo=ZoneInfo(app.config["display_timezone"])) @app.template_filter() def replace_tz(datetime_obj): return datetime_obj.replace(tzinfo=ZoneInfo(app.config["display_timezone"])) @app.template_filter() def hex2rgb(hex, alpha=None): """Convert a string to all caps.""" if not hex.startswith("#"): return hex h = hex.lstrip("#") try: rgb = tuple(int(h[i : i + 2], 16) for i in (0, 2, 4)) # noqa except Exception as err: logger.exception(f"unable to convert {hex=} to rgb: {err}") return h if alpha is None: return f"rgb({rgb[0]}, {rgb[1]}, {rgb[2]})" else: return f"rgba({rgb[0]}, {rgb[1]}, {rgb[2]}, {alpha})" def get_base_url(): if prefix := cfg.gcs_bucket_prefix: return f"https://{cfg.hostname}/{prefix}" return f"https://{cfg.hostname}" def create_app(): cfg.load() # TODO: do this default settings thing better? default_app_config = dict( display_timezone=cfg.display_timezone, FREEZER_BASE_URL=get_base_url(), FREEZER_STATIC_IGNORE=["*.scss", ".webassets-cache/*", ".DS_Store"], FREEZER_RELATIVE_URLS=False, FREEZER_REMOVE_EXTRA_FILES=True, ) logger.info(f"create_app() => {default_app_config=}") app.config.update(default_app_config) return app if __name__ == "__main__": app = create_app() app.run( host="0.0.0.0", debug=True, )
2.375
2
bin/focus_scan.py
desihub/desicmx
3
928
<gh_stars>1-10 #!/usr/bin/env python import astropy.io.fits as fits import numpy as np import os import matplotlib.pyplot as plt import argparse def _fname(expid, night, basedir='/n/home/datasystems/users/ameisner/reduced/focus', ccds=False): fname = basedir + '/' + night + '/' + str(expid).zfill(8) + '/gfa-' + str(expid).zfill(8) + '_psfs.fits' if ccds: fname = fname.replace('_psfs.fits', '_ccds.fits') return fname def _actual_expid_list(expids, night, basedir='/n/home/datasystems/users/ameisner/reduced/focus'): keep = [] for i, expid in enumerate(expids): fname = _fname(expid, night, basedir=basedir, ccds=True) if not os.path.exists(fname): continue tab = fits.getdata(fname) # try to handle case where observer accidentally lists the 'setup focus scan' # 1 second exposure as the start of the focus scan if (i == 0) & (tab[0]['EXPTIME'] < 1.1): print('SKIPPING DUMMY SETUP EXPOSURE') continue program = tab[0]['PROGRAM'].strip() if program != 'focus scan': break keep.append(expid) return keep def focus_plots(night, expids, basedir='/n/home/datasystems/users/ameisner/reduced/focus', outdir='/n/home/desiobserver/focus_scan_pngs', no_popups=False): expids = _actual_expid_list(expids, night, basedir=basedir) if len(expids) == 0: print('NO FOCUS SCAN EXPOSURES TO ANALYZE ??') assert(False) plt.figure(1, figsize=(12.0*(len(expids)/7.0), 9)) extnames = ['GUIDE0', 'GUIDE2', 'GUIDE3', 'GUIDE5', 'GUIDE7', 'GUIDE8'] focus_z = [] fwhm_pix = [] # PSF stamps plot plt.subplots_adjust(hspace=0.01, wspace=0.01) for i, expid in enumerate(expids): fname = _fname(expid, night, basedir=basedir) print(fname) fname_ccds = _fname(expid, night, basedir=basedir, ccds=True) if not os.path.exists(fname): continue ccds = fits.getdata(fname_ccds) if np.sum(np.isfinite(ccds['PSF_FWHM_PIX'])) != 0: fwhm_pix.append(np.median(ccds['PSF_FWHM_PIX'][np.isfinite(ccds['PSF_FWHM_PIX'])])) focus_z.append(float(ccds[0]['FOCUS'].split(',')[2])) hdul = fits.open(fname) extnames_present = [hdu.header['EXTNAME'] for hdu in hdul] for j, extname in enumerate(extnames): if extname not in extnames_present: continue print(i, j) plt.subplot(6, len(expids), len(expids)*j + i + 1) plt.xticks([]) plt.yticks([]) im = fits.getdata(fname, extname=extname) plt.imshow(im, interpolation='nearest', origin='lower', cmap='gray_r', vmin=0.01) plt.text(5, 44, str(expid) + '; ' + extname, color='r', fontsize=9) plt.text(10, 3.5, 'z = ' + str(int(float(ccds[0]['FOCUS'].split(',')[2]))), color='r') if np.isfinite(ccds[j]['XCENTROID_PSF']) and np.isfinite(ccds[j]['YCENTROID_PSF']): plt.scatter([ccds[j]['XCENTROID_PSF']], [ccds[j]['YCENTROID_PSF']], marker='.', c='r') expid_min = int(np.min(expids)) print(focus_z) print(fwhm_pix) plt.savefig(os.path.join(outdir, 'stamps_focus_scan-' + str(expid_min).zfill(8)+'.png'), bbox_inches='tight') #plt.cla() plt.figure(200) asec_per_pix = 0.205 focus_z = np.array(focus_z) fwhm_asec = np.array(fwhm_pix)*asec_per_pix plt.scatter(focus_z, fwhm_asec) plt.xlabel('focus z (micron)') plt.ylabel('FWHM (asec)') coeff = np.polyfit(focus_z, fwhm_asec, 2) xsamp = np.arange(np.min(focus_z), np.max(focus_z)) ysamp = coeff[0]*(np.power(xsamp, 2)) + coeff[1]*xsamp + coeff[2] plt.title('focus scan starting with EXPID = ' + str(expid_min)) plt.plot(xsamp, ysamp) zmin = -coeff[1]/(2*coeff[0]) min_fwhm_fit_asec = coeff[0]*(zmin**2) + coeff[1]*zmin + coeff[2] yrange = [np.min(fwhm_asec), np.max(fwhm_asec)] plt.text(focus_z[2], yrange[0] + 0.8*(yrange[1]-yrange[0]), 'best FWHM (meas) : ' + '{:.2f}'.format(np.min(fwhm_asec))) plt.text(focus_z[2], yrange[0] + 0.7*(yrange[1]-yrange[0]), 'best FWHM (fit) : ' + '{:.2f}'.format(min_fwhm_fit_asec)) plt.text(focus_z[2], yrange[0] + 0.9*(yrange[1]-yrange[0]), 'best focus : ' + str(int(np.round(zmin)))) plt.savefig(os.path.join(outdir, 'fit_focus_scan-' + str(expid_min).zfill(8) + '.png'), bbox_inches='tight') if not no_popups: plt.show() def _test(): night = '20200131' expids = 45446 + np.arange(7) focus_plots(night, expids, basedir='/project/projectdirs/desi/users/ameisner/GFA/run/psf_flux_weighted_centroid', outdir='.') def _test_missing_cam(): night = '20200131' expids = 45485 + np.arange(7) focus_plots(night, expids, basedir='/project/projectdirs/desi/users/ameisner/GFA/run/psf_flux_weighted_centroid') if __name__ == "__main__": descr = 'GFA focus sequence plots/analysis' parser = argparse.ArgumentParser(description=descr) parser.add_argument('first_expid', type=int, nargs=1) parser.add_argument('night', type=str, nargs=1) parser.add_argument('--basedir', default='/n/home/datasystems/users/ameisner/reduced/focus', type=str, help='base directory for GFA reductions') parser.add_argument('--outdir', default='/n/home/desiobserver/focus_scan_pngs', type=str, help='output directory for plot PNGs') parser.add_argument('--no_popups', default=False, action='store_true', help='write PNGs without popping up plot windows') args = parser.parse_args() expids = args.first_expid + np.arange(16, dtype=int) print(expids) print(args.night[0]) print(args.basedir) outdir = args.outdir if os.path.exists(args.outdir) else '.' focus_plots(args.night[0], expids, basedir=args.basedir, outdir=outdir, no_popups=args.no_popups)
2.25
2
proto_3/ddq/topics/logics/topic.py
jadnohra/connect
0
929
from typing import List from ddq.taxonomy.reference import Reference from ddq.topics.topic import Topic class Logic(Topic): def references(self) -> List[Reference]: return [ Reference("Classical and Nonclassical Logics", [("Eric", "Schechter")]) ]
2.4375
2
pythia/utils/logger.py
abhiskk/pythia
2
930
# Copyright (c) Facebook, Inc. and its affiliates. import base64 import logging import os import sys from tensorboardX import SummaryWriter from pythia.utils.distributed_utils import is_main_process from pythia.utils.general import (ckpt_name_from_core_args, foldername_from_config_override) from pythia.utils.timer import Timer class Logger: def __init__(self, config): self.logger = None self.summary_writer = None if not is_main_process(): return self.timer = Timer() self.config = config self.save_dir = config.training_parameters.save_dir self.log_folder = ckpt_name_from_core_args(config) self.log_folder += foldername_from_config_override(config) time_format = "%Y-%m-%dT%H:%M:%S" self.log_filename = ckpt_name_from_core_args(config) + "_" self.log_filename += self.timer.get_time_hhmmss(None, format=time_format) self.log_filename += ".log" self.log_folder = os.path.join(self.save_dir, self.log_folder, "logs") arg_log_dir = self.config.get("log_dir", None) if arg_log_dir: self.log_folder = arg_log_dir if not os.path.exists(self.log_folder): os.makedirs(self.log_folder) tensorboard_folder = os.path.join(self.log_folder, "tensorboard") self.summary_writer = SummaryWriter(tensorboard_folder) self.log_filename = os.path.join(self.log_folder, self.log_filename) print("Logging to:", self.log_filename) logging.captureWarnings(True) self.logger = logging.getLogger(__name__) self._file_only_logger = logging.getLogger(__name__) warnings_logger = logging.getLogger("py.warnings") # Set level level = config["training_parameters"].get("logger_level", "info") self.logger.setLevel(getattr(logging, level.upper())) self._file_only_logger.setLevel(getattr(logging, level.upper())) formatter = logging.Formatter( "%(asctime)s %(levelname)s: %(message)s", datefmt="%Y-%m-%dT%H:%M:%S" ) # Add handler to file channel = logging.FileHandler(filename=self.log_filename, mode="a") channel.setFormatter(formatter) self.logger.addHandler(channel) self._file_only_logger.addHandler(channel) warnings_logger.addHandler(channel) # Add handler to stdout channel = logging.StreamHandler(sys.stdout) channel.setFormatter(formatter) self.logger.addHandler(channel) warnings_logger.addHandler(channel) should_not_log = self.config["training_parameters"]["should_not_log"] self.should_log = not should_not_log # Single log wrapper map self._single_log_map = set() def __del__(self): if getattr(self, "summary_writer", None) is not None: self.summary_writer.close() def write(self, x, level="info", donot_print=False): if self.logger is None: return # if it should not log then just print it if self.should_log: if hasattr(self.logger, level): if donot_print: getattr(self._file_only_logger, level)(str(x)) else: getattr(self.logger, level)(str(x)) else: self.logger.error("Unknown log level type: %s" % level) else: print(str(x) + "\n") def single_write(self, x, level="info"): if x + "_" + level in self._single_log_map: return else: self.write(x, level) def add_scalar(self, key, value, iteration): if self.summary_writer is None: return self.summary_writer.add_scalar(key, value, iteration) def add_scalars(self, scalar_dict, iteration): if self.summary_writer is None: return for key, val in scalar_dict.items(): self.summary_writer.add_scalar(key, val, iteration) def add_histogram_for_model(self, model, iteration): if self.summary_writer is None: return for name, param in model.named_parameters(): np_param = param.clone().cpu().data.numpy() self.summary_writer.add_histogram(name, np_param, iteration)
1.796875
2
sdk/python/pulumi_google_native/healthcare/v1beta1/user_data_mapping.py
AaronFriel/pulumi-google-native
44
931
<reponame>AaronFriel/pulumi-google-native # coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs from ._inputs import * __all__ = ['UserDataMappingArgs', 'UserDataMapping'] @pulumi.input_type class UserDataMappingArgs: def __init__(__self__, *, consent_store_id: pulumi.Input[str], data_id: pulumi.Input[str], dataset_id: pulumi.Input[str], user_id: pulumi.Input[str], location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, resource_attributes: Optional[pulumi.Input[Sequence[pulumi.Input['AttributeArgs']]]] = None): """ The set of arguments for constructing a UserDataMapping resource. :param pulumi.Input[str] data_id: A unique identifier for the mapped resource. :param pulumi.Input[str] user_id: User's UUID provided by the client. :param pulumi.Input[str] name: Resource name of the User data mapping, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/consentStores/{consent_store_id}/userDataMappings/{user_data_mapping_id}`. :param pulumi.Input[Sequence[pulumi.Input['AttributeArgs']]] resource_attributes: Attributes of the resource. Only explicitly set attributes are displayed here. Attribute definitions with defaults set implicitly apply to these User data mappings. Attributes listed here must be single valued, that is, exactly one value is specified for the field "values" in each Attribute. """ pulumi.set(__self__, "consent_store_id", consent_store_id) pulumi.set(__self__, "data_id", data_id) pulumi.set(__self__, "dataset_id", dataset_id) pulumi.set(__self__, "user_id", user_id) if location is not None: pulumi.set(__self__, "location", location) if name is not None: pulumi.set(__self__, "name", name) if project is not None: pulumi.set(__self__, "project", project) if resource_attributes is not None: pulumi.set(__self__, "resource_attributes", resource_attributes) @property @pulumi.getter(name="consentStoreId") def consent_store_id(self) -> pulumi.Input[str]: return pulumi.get(self, "consent_store_id") @consent_store_id.setter def consent_store_id(self, value: pulumi.Input[str]): pulumi.set(self, "consent_store_id", value) @property @pulumi.getter(name="dataId") def data_id(self) -> pulumi.Input[str]: """ A unique identifier for the mapped resource. """ return pulumi.get(self, "data_id") @data_id.setter def data_id(self, value: pulumi.Input[str]): pulumi.set(self, "data_id", value) @property @pulumi.getter(name="datasetId") def dataset_id(self) -> pulumi.Input[str]: return pulumi.get(self, "dataset_id") @dataset_id.setter def dataset_id(self, value: pulumi.Input[str]): pulumi.set(self, "dataset_id", value) @property @pulumi.getter(name="userId") def user_id(self) -> pulumi.Input[str]: """ User's UUID provided by the client. """ return pulumi.get(self, "user_id") @user_id.setter def user_id(self, value: pulumi.Input[str]): pulumi.set(self, "user_id", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Resource name of the User data mapping, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/consentStores/{consent_store_id}/userDataMappings/{user_data_mapping_id}`. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def project(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "project") @project.setter def project(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "project", value) @property @pulumi.getter(name="resourceAttributes") def resource_attributes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AttributeArgs']]]]: """ Attributes of the resource. Only explicitly set attributes are displayed here. Attribute definitions with defaults set implicitly apply to these User data mappings. Attributes listed here must be single valued, that is, exactly one value is specified for the field "values" in each Attribute. """ return pulumi.get(self, "resource_attributes") @resource_attributes.setter def resource_attributes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AttributeArgs']]]]): pulumi.set(self, "resource_attributes", value) class UserDataMapping(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, consent_store_id: Optional[pulumi.Input[str]] = None, data_id: Optional[pulumi.Input[str]] = None, dataset_id: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, resource_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AttributeArgs']]]]] = None, user_id: Optional[pulumi.Input[str]] = None, __props__=None): """ Creates a new User data mapping in the parent consent store. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] data_id: A unique identifier for the mapped resource. :param pulumi.Input[str] name: Resource name of the User data mapping, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/consentStores/{consent_store_id}/userDataMappings/{user_data_mapping_id}`. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AttributeArgs']]]] resource_attributes: Attributes of the resource. Only explicitly set attributes are displayed here. Attribute definitions with defaults set implicitly apply to these User data mappings. Attributes listed here must be single valued, that is, exactly one value is specified for the field "values" in each Attribute. :param pulumi.Input[str] user_id: User's UUID provided by the client. """ ... @overload def __init__(__self__, resource_name: str, args: UserDataMappingArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Creates a new User data mapping in the parent consent store. :param str resource_name: The name of the resource. :param UserDataMappingArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(UserDataMappingArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, consent_store_id: Optional[pulumi.Input[str]] = None, data_id: Optional[pulumi.Input[str]] = None, dataset_id: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, project: Optional[pulumi.Input[str]] = None, resource_attributes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['AttributeArgs']]]]] = None, user_id: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = UserDataMappingArgs.__new__(UserDataMappingArgs) if consent_store_id is None and not opts.urn: raise TypeError("Missing required property 'consent_store_id'") __props__.__dict__["consent_store_id"] = consent_store_id if data_id is None and not opts.urn: raise TypeError("Missing required property 'data_id'") __props__.__dict__["data_id"] = data_id if dataset_id is None and not opts.urn: raise TypeError("Missing required property 'dataset_id'") __props__.__dict__["dataset_id"] = dataset_id __props__.__dict__["location"] = location __props__.__dict__["name"] = name __props__.__dict__["project"] = project __props__.__dict__["resource_attributes"] = resource_attributes if user_id is None and not opts.urn: raise TypeError("Missing required property 'user_id'") __props__.__dict__["user_id"] = user_id __props__.__dict__["archive_time"] = None __props__.__dict__["archived"] = None super(UserDataMapping, __self__).__init__( 'google-native:healthcare/v1beta1:UserDataMapping', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'UserDataMapping': """ Get an existing UserDataMapping resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = UserDataMappingArgs.__new__(UserDataMappingArgs) __props__.__dict__["archive_time"] = None __props__.__dict__["archived"] = None __props__.__dict__["data_id"] = None __props__.__dict__["name"] = None __props__.__dict__["resource_attributes"] = None __props__.__dict__["user_id"] = None return UserDataMapping(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="archiveTime") def archive_time(self) -> pulumi.Output[str]: """ Indicates the time when this mapping was archived. """ return pulumi.get(self, "archive_time") @property @pulumi.getter def archived(self) -> pulumi.Output[bool]: """ Indicates whether this mapping is archived. """ return pulumi.get(self, "archived") @property @pulumi.getter(name="dataId") def data_id(self) -> pulumi.Output[str]: """ A unique identifier for the mapped resource. """ return pulumi.get(self, "data_id") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Resource name of the User data mapping, of the form `projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/consentStores/{consent_store_id}/userDataMappings/{user_data_mapping_id}`. """ return pulumi.get(self, "name") @property @pulumi.getter(name="resourceAttributes") def resource_attributes(self) -> pulumi.Output[Sequence['outputs.AttributeResponse']]: """ Attributes of the resource. Only explicitly set attributes are displayed here. Attribute definitions with defaults set implicitly apply to these User data mappings. Attributes listed here must be single valued, that is, exactly one value is specified for the field "values" in each Attribute. """ return pulumi.get(self, "resource_attributes") @property @pulumi.getter(name="userId") def user_id(self) -> pulumi.Output[str]: """ User's UUID provided by the client. """ return pulumi.get(self, "user_id")
1.8125
2
_doc/sphinxdoc/source/conf.py
Jerome-maker/ensae_teaching_cs
0
932
import sys import os import sphinx_rtd_theme source_path = os.path.normpath( os.path.join( os.path.abspath( os.path.split(__file__)[0]))) try: from conf_base import * except ImportError: sys.path.append(source_path) from conf_base import * html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] templates_path = [os.path.join(source_path, 'phdoc_static')] html_static_path = [os.path.join(source_path, 'phdoc_static')] if not os.path.exists(templates_path[0]): raise FileNotFoundError(templates_path[0]) blog_root = "http://www.xavierdupre.fr/app/ensae_teaching_cs/helpsphinx3/"
1.960938
2
dolfyn/adv/api.py
lkilcher/dolfyn-light
0
933
<filename>dolfyn/adv/api.py """ This module contains routines for reading and working with adv data. It contains: +-----------------------------------+-----------------------------------------+ | Name | Description | +===================================+=========================================+ | :func:`~dolfyn.adv.base.load` | A function for loading ADV data in | | | DOLfYN format. | +-----------------------------------+-----------------------------------------+ | :func:`~dolfyn.adv.base.mmload` | A function for loading ADV data in | | | DOLfYN format (as memory mapped arrays).| +-----------------------------------+-----------------------------------------+ | :func:`~dolfyn.io.nortek.\ | A function for reading Nortek Vector | | read_nortek` | files. | +-----------------------------------+-----------------------------------------+ | :mod:`rotate <dolfyn.adv.rotate>` | A module containing classes and | | | functions for rotating adv data between | | | different coordinate systems | +-----------------------------------+-----------------------------------------+ | :mod:`motion <dolfyn.adv.rotate>` | A module containing classes and | | | functions for performing motion | | | correction. | +-----------------------------------+-----------------------------------------+ | :class:`~dolfyn.\ | A class for breaking ADV data into | | adv.turbulence.TurbBinner` | 'bins', averaging it and estimating | | | various turbulence statistics. | +-----------------------------------+-----------------------------------------+ Examples -------- .. literalinclude:: ../examples/adv_example01.py """ from .base import load, mmload from .turbulence import TurbBinner from . import clean from ..io.nortek import read_nortek from . import rotate from . import motion
2.265625
2
server/api/migrations/0002_auto_20201011_1053.py
ShahriarDhruvo/WebTech_Assignment2
0
934
# Generated by Django 3.1.2 on 2020-10-11 10:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('api', '0001_initial'), ] operations = [ migrations.AlterField( model_name='task', name='author', field=models.CharField(default='Anonymous', max_length=100), ), migrations.AlterField( model_name='task', name='deadline', field=models.DateTimeField(default='2020-10-11 10:53'), ), ]
1.523438
2
flink-ai-flow/lib/notification_service/notification_service/mongo_event_storage.py
lisy09/flink-ai-extended
1
935
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import time import socket from collections import Iterable from typing import Union, Tuple from mongoengine import connect from notification_service.event_storage import BaseEventStorage from notification_service.base_notification import BaseEvent from notification_service.mongo_notification import MongoEvent class MongoEventStorage(BaseEventStorage): def __init__(self, *args, **kwargs): self.db_conn = self.setup_connection(**kwargs) self.server_ip = socket.gethostbyname(socket.gethostname()) def setup_connection(self, **kwargs): db_conf = { "host": kwargs.get("host"), "port": kwargs.get("port"), "db": kwargs.get("db"), } username = kwargs.get("username", None) password = kwargs.get("password", None) authentication_source = kwargs.get("authentication_source", "admin") if (username or password) and not (username and password): raise Exception("Please provide valid username and password") if username and password: db_conf.update({ "username": username, "password": password, "authentication_source": authentication_source }) return connect(**db_conf) def get_latest_version(self, key: str, namespace: str = None): mongo_events = MongoEvent.get_by_key(key, 0, 1, "-version") if not mongo_events: return 0 return mongo_events[0].version def add_event(self, event: BaseEvent, uuid: str): kwargs = { "server_ip": self.server_ip, "create_time": int(time.time() * 1000), "event_type": event.event_type, "key": event.key, "value": event.value, "context": event.context, "namespace": event.namespace, "sender": event.sender, "uuid": uuid } mongo_event = MongoEvent(**kwargs) mongo_event.save() mongo_event.reload() event.create_time = mongo_event.create_time event.version = mongo_event.version return event def list_events(self, key: Union[str, Tuple[str]], version: int = None, event_type: str = None, start_time: int = None, namespace: str = None, sender: str = None): key = None if key == "" else key version = None if version == 0 else version event_type = None if event_type == "" else event_type namespace = None if namespace == "" else namespace sender = None if sender == "" else sender if isinstance(key, str): key = (key,) elif isinstance(key, Iterable): key = tuple(key) res = MongoEvent.get_base_events(key, version, event_type, start_time, namespace, sender) return res def list_all_events(self, start_time: int): res = MongoEvent.get_base_events_by_time(start_time) return res def list_all_events_from_version(self, start_version: int, end_version: int = None): res = MongoEvent.get_base_events_by_version(start_version, end_version) return res def clean_up(self): MongoEvent.delete_by_client(self.server_ip)
2.015625
2
src/unicon/plugins/confd/csp/__init__.py
tahigash/unicon.plugins
1
936
__author__ = "<NAME> <<EMAIL>>" from unicon.plugins.confd import ConfdServiceList, ConfdConnection, ConfdConnectionProvider from .statemachine import CspStateMachine from .settings import CspSettings from . import service_implementation as csp_svc class CspServiceList(ConfdServiceList): def __init__(self): super().__init__() delattr(self, 'cli_style') self.reload = csp_svc.Reload class CspSingleRPConnection(ConfdConnection): os = 'confd' series = 'csp' chassis_type = 'single_rp' state_machine_class = CspStateMachine connection_provider_class = ConfdConnectionProvider subcommand_list = CspServiceList settings = CspSettings()
1.804688
2
setup.py
rrwen/search_google
15
937
# -*- coding: utf-8 -*- from setuptools import setup import search_google as package def readme(): with open('README.rst') as f: return ''.join(f.readlines()[11:]) setup( name=package.__name__, version=package.__version__, description=package.__description__, long_description=readme(), author=package.__author__, author_email=package.__email__, license=package.__license__, url=package.__url__, download_url=package.__download_url__, keywords =package. __keywords__, entry_points=package.__entry_points__, packages=package.__packages__, package_data=package.__package_data__, install_requires=package.__install_requires__ )
1.265625
1
setup.py
chearon/macpack
24
938
<filename>setup.py import setuptools import os try: import pypandoc description = pypandoc.convert('README.md', 'rst') if os.path.exists('README.md') else '' except ImportError: description = '' setuptools.setup( name = 'macpack', packages = setuptools.find_packages(), version = '1.0.3', description = 'Makes a macOS binary redistributable by searching the dependency tree and copying/patching non-system libraries.', long_description = description, author = '<NAME>', author_email = '<EMAIL>', url = 'https://github.com/chearon/macpack', download_url = 'https://github.com/chearon/macpack/tarball/v1.0.3', keywords = ['macos', 'bundle', 'package', 'redistribute', 'redistributable', 'install_name_tool', 'otool', 'mach'], classifiers = [], entry_points = { 'console_scripts': ['macpack=macpack.patcher:main'], } )
1.796875
2
WEEK2/day5/scripts/06_NB_Challenges_Isolines.py
tizon9804/SS2017
0
939
import vtk # Read the file (to test that it was written correctly) reader = vtk.vtkXMLImageDataReader() reader.SetFileName("../data/wind_image.vti") reader.Update() print(reader.GetOutput()) # Convert the image to a polydata imageDataGeometryFilter = vtk.vtkImageDataGeometryFilter() imageDataGeometryFilter.SetInputConnection(reader.GetOutputPort()) imageDataGeometryFilter.Update() scalarRange = reader.GetOutput().GetPointData().GetScalars().GetRange(-1) contoursFilter = vtk.vtkContourFilter() contoursFilter.SetInputConnection(imageDataGeometryFilter.GetOutputPort()) contoursFilter.GenerateValues(60, scalarRange) contoursMapper = vtk.vtkPolyDataMapper() contoursMapper.SetInputConnection(contoursFilter.GetOutputPort()) contoursMapper.SetColorModeToMapScalars() contoursMapper.ScalarVisibilityOn() contoursMapper.SelectColorArray("JPEGImage") contoursMapper.SetScalarRange(scalarRange) contoursActor = vtk.vtkActor() contoursActor.SetMapper(contoursMapper) actor = vtk.vtkActor() actor.SetMapper(contoursMapper) # Setup rendering renderer = vtk.vtkRenderer() renderer.AddActor(actor) renderer.SetBackground(1,1,1) renderer.ResetCamera() renderWindow = vtk.vtkRenderWindow() renderWindow.AddRenderer(renderer) renderWindowInteractor = vtk.vtkRenderWindowInteractor() renderWindowInteractor.SetRenderWindow(renderWindow) renderWindowInteractor.Start()
2.421875
2
mbbl_envs/mbbl/env/gym_env/invertedPendulum.py
hbutsuak95/iv_rl
9
940
<reponame>hbutsuak95/iv_rl """ # ----------------------------------------------------------------------------- # @brief: # Tingwu: reset the reward function so that it's more similar to the one # defined in GYM # ----------------------------------------------------------------------------- """ import numpy as np from mbbl.config import init_path from mbbl.env import base_env_wrapper as bew from mbbl.env import env_register from mbbl.env import env_util from mbbl.util.common import logger class env(bew.base_env): # acrobot has applied sin/cos obs PENDULUM = ['gym_invertedPendulum'] def __init__(self, env_name, rand_seed, misc_info): super(env, self).__init__(env_name, rand_seed, misc_info) self._base_path = init_path.get_abs_base_dir() self._len_qpos, self._len_qvel = \ env_util.get_gym_q_info(self._env, self._current_version) # return the reset as the gym? if 'reset_type' in misc_info and misc_info['reset_type'] == 'gym': self._reset_return_obs_only = True self.observation_space, self.action_space = \ self._env.observation_space, self._env.action_space # it's possible some environments have different obs self.observation_space = \ env_util.box(self._env_info['ob_size'], -1, 1) else: self._reset_return_obs_only = False def step(self, action): _, _, _, info = self._env.step(action) ob = self._get_observation() # get the reward reward = self.reward( {'end_state': ob, 'start_state': self._old_ob, 'action': action} ) # from mbbl.util.common.fpdb import fpdb; fpdb().set_trace() # get the end signal self._current_step += 1 info['current_step'] = self._current_step if self._current_step > self._env_info['max_length']: done = True else: done = False # will raise warnings -> set logger flag to ignore self._old_ob = np.array(ob) return ob, reward, done, info def reset(self, control_info={}): self._current_step = 0 self._env.reset() # the following is a hack, there is some precision issue in mujoco_py self._old_ob = self._get_observation() self._env.reset() self.set_state({'start_state': self._old_ob.copy()}) self._old_ob = self._get_observation() if self._reset_return_obs_only: return self._old_ob.copy() else: return self._old_ob.copy(), 0.0, False, {} def _get_observation(self): if self._current_version in ['0.7.4', '0.9.4']: qpos = self._env.env.data.qpos qvel = self._env.env.data.qvel else: qpos = self._env.env.sim.data.qpos qvel = self._env.env.sim.data.qvel """ if self._env_name == 'gym_doublePendulum': if self._current_version in ['0.7.4', '0.9.4']: site_xpos = self._env.env.data.site_xpos[:, [0, 2]] else: site_xpos = self._env.env.sim.data.site_xpos[:, [0, 2]] site_xpos = np.transpose(site_xpos) return np.concatenate([qpos, qvel, site_xpos]).ravel() else: """ assert self._env_name == 'gym_invertedPendulum' return np.concatenate([qpos, qvel]).ravel() def _build_env(self): import gym self._current_version = gym.__version__ if self._current_version in ['0.7.4', '0.9.4']: _env_name = { 'gym_invertedPendulum': 'InvertedPendulum-v1', } elif self._current_version == NotImplementedError: # TODO: other gym versions here _env_name = { 'gym_invertedPendulum': 'InvertedPendulum-v2', } else: raise ValueError("Invalid gym-{}".format(self._current_version)) # make the environments self._env_info = env_register.get_env_info(self._env_name) self._env_name = self._env_name.split('-')[0] self._env = gym.make(_env_name[self._env_name]) def _set_groundtruth_api(self): """ @brief: In this function, we could provide the ground-truth dynamics and rewards APIs for the agent to call. For the new environments, if we don't set their ground-truth apis, then we cannot test the algorithm using ground-truth dynamics or reward """ self._set_reward_api() self._set_dynamics_api() def _set_dynamics_api(self): def set_state(data_dict): qpos = np.zeros([self._len_qpos]) qvel = np.zeros([self._len_qvel]) qpos[:] = data_dict['start_state'][:self._len_qpos] qvel[:] = data_dict['start_state'][ self._len_qpos: self._len_qpos + self._len_qvel ] # reset the state if self._current_version in ['0.7.4', '0.9.4']: self._env.env.data.qpos = qpos.reshape([-1, 1]) self._env.env.data.qvel = qvel.reshape([-1, 1]) else: self._env.env.sim.data.qpos = qpos.reshape([-1]) self._env.env.sim.data.qvel = qpos.reshape([-1]) self._env.env.model._compute_subtree() # pylint: disable=W0212 self._env.env.model.forward() self._old_ob = self._get_observation() self.set_state = set_state def fdynamics(data_dict): # make sure reset is called before using self.fynamics() self.set_state(data_dict) return self.step(data_dict['action'])[0] self.fdynamics = fdynamics def _set_reward_api(self): """ def _step(self, a): reward = 1.0 self.do_simulation(a, self.frame_skip) ob = self._get_obs() notdone = np.isfinite(ob).all() and (np.abs(ob[1]) <= .2) done = not notdone self.do_simulation(action, self.frame_skip) ob = self._get_obs() x, _, y = self.model.data.site_xpos[0] dist_penalty = 0.01 * x ** 2 + (y - 2) ** 2 v1, v2 = self.model.data.qvel[1:3] vel_penalty = 1e-3 * v1**2 + 5e-3 * v2**2 alive_bonus = 10 r = (alive_bonus - dist_penalty - vel_penalty)[0] done = bool(y <= 1) return ob, r, done, {} reward: @xpos_penalty: x ** 2 @ypos_penalty: (y - 2) ** 2 pendulum: (slide, hinge) qpos: 2 (0, 1) qvel: 2 (2, 3) double_pendulum: (slide, hinge, hinge) qpos: 3 (0, 1, 2) qvel: 3 (3, 4, 5) site_pose: 2 (6, 7) """ # step 1, set the zero-order reward function assert self._env_name in self.PENDULUM """ xpos_ob_pos = \ {'gym_pendulum': 0, 'gym_doublePendulum': 6}[self._env_name] ypos_ob_pos = \ {'gym_pendulum': 1, 'gym_doublePendulum': 7}[self._env_name] ypos_target = \ {'gym_pendulum': 0.0, 'gym_doublePendulum': 2}[self._env_name] xpos_coeff = \ {'gym_pendulum': 0.0, 'gym_doublePendulum': 0.01}[self._env_name] """ xpos_ob_pos = 0 ypos_ob_pos = 1 ypos_target = 0.0 xpos_coeff = 0.0 def reward(data_dict): # xpos penalty xpos = data_dict['start_state'][xpos_ob_pos] xpos_reward = -(xpos ** 2) * xpos_coeff # ypos penalty ypos = data_dict['start_state'][ypos_ob_pos] ypos_reward = -(ypos - ypos_target) ** 2 return xpos_reward + ypos_reward self.reward = reward def reward_derivative(data_dict, target): num_data = len(data_dict['start_state']) if target == 'state': derivative_data = np.zeros( [num_data, self._env_info['ob_size']], dtype=np.float ) # the xpos reward part derivative_data[:, xpos_ob_pos] += - 2.0 * xpos_coeff * \ (data_dict['start_state'][:, xpos_ob_pos]) # the ypos reward part derivative_data[:, ypos_ob_pos] += - 2.0 * \ (data_dict['start_state'][:, ypos_ob_pos] - ypos_target) elif target == 'action': derivative_data = np.zeros( [num_data, self._env_info['action_size']], dtype=np.float ) elif target == 'state-state': derivative_data = np.zeros( [num_data, self._env_info['ob_size'], self._env_info['ob_size']], dtype=np.float ) # the xpos reward derivative_data[:, xpos_ob_pos, xpos_ob_pos] += \ - 2.0 * xpos_coeff # the ypos reward derivative_data[:, ypos_ob_pos, ypos_ob_pos] += \ - 2.0 elif target == 'action-state': derivative_data = np.zeros( [num_data, self._env_info['action_size'], self._env_info['ob_size']], dtype=np.float ) elif target == 'state-action': derivative_data = np.zeros( [num_data, self._env_info['ob_size'], self._env_info['action_size']], dtype=np.float ) elif target == 'action-action': derivative_data = np.zeros( [num_data, self._env_info['action_size'], self._env_info['action_size']], dtype=np.float ) else: assert False, logger.error('Invalid target {}'.format(target)) return derivative_data self.reward_derivative = reward_derivative def render(self, *args, **kwargs): return if __name__ == '__main__': # test_env_name = ['gym_doublePendulum'] test_env_name = ['gym_invertedPendulum'] for env_name in test_env_name: test_env = env(env_name, 1234, {}) api_env = env(env_name, 1234, {}) api_env.reset() ob, reward, _, _ = test_env.reset() for _ in range(100): action = np.random.uniform(-1, 1, test_env._env.action_space.shape) new_ob, reward, _, _ = test_env.step(action) # test the reward api reward_from_api = \ api_env.reward({'start_state': ob, 'action': action}) reward_error = np.sum(np.abs(reward_from_api - reward)) # test the dynamics api newob_from_api = \ api_env.fdynamics({'start_state': ob, 'action': action}) ob_error = np.sum(np.abs(newob_from_api - new_ob)) ob = new_ob print('reward error: {}, dynamics error: {}'.format( reward_error, ob_error) )
2.359375
2
ACME/visdom/__init__.py
mauriziokovacic/ACME
3
941
from .bar import * from .create_session import * from .image import * from .line import * from .mesh import * from .pie import * from .text import * from .VisdomFigure import * from .VisdomScene import *
1.09375
1
web/pingpongpiweb.py
andrewdyersmith/pingpongpi
0
942
<gh_stars>0 # Ping Pong Pi web UI running on flask. # Uses zmq to speak to daemon controlling screen. from flask import Flask, render_template, appcontext_tearing_down, request from multiprocessing import Process, Queue from multiprocessing.connection import Client import atexit import time import zmq app = Flask(__name__) @app.route('/') def index(): return render_template('index.html') MODE="mode" @app.route('/mode/<name>', methods=['POST']) def mode(name): text = request.args.get("val", default="", type=str) message_queue.put([MODE,name,text]) return "\"OK\"" message_queue = Queue() message_process = None def message_loop(message_queue): print("Starting message loop") context = zmq.Context() while True: try: socket = context.socket(zmq.REQ) socket.connect("tcp://localhost:5555") print("Connected to daemon") while True: msg = message_queue.get() print("Sending ", msg) socket.send_json(msg) socket.recv() except Exception as ex: print(ex) time.sleep(5) def stop_message_loop(): print("Terminating") if message_process: message_process.terminate() atexit.register(stop_message_loop) @app.before_first_request def setup_ipc(): global message_process message_process = Process(target=message_loop, args=(message_queue,)) message_process.start() if __name__ == '__main__': app.run(debug=True, host='0.0.0.0')
2.5625
3
watcher/tests/decision_engine/strategy/strategies/test_base.py
ajaytikoo/watcher
64
943
# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from watcher.common import exception from watcher.decision_engine.datasources import manager from watcher.decision_engine.model import model_root from watcher.decision_engine.strategy import strategies from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state class TestBaseStrategy(base.TestCase): def setUp(self): super(TestBaseStrategy, self).setUp() # fake cluster self.fake_c_cluster = faker_cluster_state.FakerModelCollector() p_c_model = mock.patch.object( strategies.BaseStrategy, "compute_model", new_callable=mock.PropertyMock) self.m_c_model = p_c_model.start() self.addCleanup(p_c_model.stop) p_audit_scope = mock.patch.object( strategies.BaseStrategy, "audit_scope", new_callable=mock.PropertyMock) self.m_audit_scope = p_audit_scope.start() self.addCleanup(p_audit_scope.stop) self.m_audit_scope.return_value = mock.Mock() self.m_c_model.return_value = model_root.ModelRoot() self.strategy = strategies.DummyStrategy(config=mock.Mock()) class TestBaseStrategyDatasource(TestBaseStrategy): def setUp(self): super(TestBaseStrategyDatasource, self).setUp() self.strategy = strategies.DummyStrategy( config=mock.Mock(datasources=None)) @mock.patch.object(strategies.BaseStrategy, 'osc', None) @mock.patch.object(manager, 'DataSourceManager') @mock.patch.object(strategies.base, 'CONF') def test_global_preference(self, m_conf, m_manager): """Test if the global preference is used""" m_conf.watcher_datasources.datasources = \ ['gnocchi', 'monasca', 'ceilometer'] # Make sure we access the property and not the underlying function. m_manager.return_value.get_backend.return_value = \ mock.NonCallableMock() # Access the property so that the configuration is read in order to # get the correct datasource self.strategy.datasource_backend m_manager.assert_called_once_with( config=m_conf.watcher_datasources, osc=None) @mock.patch.object(strategies.BaseStrategy, 'osc', None) @mock.patch.object(manager, 'DataSourceManager') @mock.patch.object(strategies.base, 'CONF') def test_global_preference_reverse(self, m_conf, m_manager): """Test if the global preference is used with another order""" m_conf.watcher_datasources.datasources = \ ['ceilometer', 'monasca', 'gnocchi'] # Make sure we access the property and not the underlying function. m_manager.return_value.get_backend.return_value = \ mock.NonCallableMock() # Access the property so that the configuration is read in order to # get the correct datasource self.strategy.datasource_backend m_manager.assert_called_once_with( config=m_conf.watcher_datasources, osc=None) @mock.patch.object(strategies.BaseStrategy, 'osc', None) @mock.patch.object(manager, 'DataSourceManager') @mock.patch.object(strategies.base, 'CONF') def test_strategy_preference_override(self, m_conf, m_manager): """Test if the global preference can be overridden""" datasources = mock.Mock(datasources=['ceilometer']) self.strategy = strategies.DummyStrategy( config=datasources) m_conf.watcher_datasources.datasources = \ ['ceilometer', 'monasca', 'gnocchi'] # Access the property so that the configuration is read in order to # get the correct datasource self.strategy.datasource_backend m_manager.assert_called_once_with( config=datasources, osc=None) class TestBaseStrategyException(TestBaseStrategy): def setUp(self): super(TestBaseStrategyException, self).setUp() def test_exception_model(self): self.m_c_model.return_value = None self.assertRaises( exception.ClusterStateNotDefined, self.strategy.execute) def test_exception_stale_cdm(self): self.fake_c_cluster.set_cluster_data_model_as_stale() self.m_c_model.return_value = self.fake_c_cluster.cluster_data_model self.assertRaises( # TODO(Dantali0n) This should return ClusterStale, # improve set_cluster_data_model_as_stale(). exception.ClusterStateNotDefined, self.strategy.execute)
1.710938
2
main.py
BenG49/sudoku
0
944
<filename>main.py from sudoku import Sudoku def main(): s = Sudoku.parse( ''' ------------- | |2 | | | | 6 |4 3| | | 5| 7 | ------------- | 7 | 2|8 | |51 | 4|9 | | 9| 3| | ------------- | | 9| | | 2| | 98| | 83|1 |2 | ------------- ''' ) print(s) print(s.solve()) if __name__ == '__main__': main()
3
3
test/test_sampler.py
pfnet-research/autogbt-alt
83
945
<filename>test/test_sampler.py<gh_stars>10-100 import numpy as np import pandas as pd from autogbt.sampler import MajorityUnderSampler def _test_sample(y): sampler = MajorityUnderSampler() idx = sampler.sample(y, 40000, 3.0) assert len(idx) == 40000 assert y[idx].sum() == 10000 def test_sample_with_series(): y = pd.Series(np.concatenate([np.ones((10000)), np.zeros((100000))])) y = y.sample(frac=1.0) _test_sample(y) def test_sample_with_ndarray(): y = np.concatenate([np.ones((10000)), np.zeros((100000))]) _test_sample(y) def test_sample_for_regression(): y = np.concatenate([ 2*np.ones((10000)), 1*np.ones((10000)), 0*np.ones((10000)), ]) sampler = MajorityUnderSampler() idx = sampler.sample(y, 0.1, 3.0) assert len(idx) == 3000
2.5625
3
experiment/diabetes/accuracy_info.py
leandro-santiago/bloomwisard
2
946
import numpy as np import sys from timeit import default_timer as timer sys.path.append("../../") from core import wnn from encoding import thermometer from encoding import util #Load Diabetes data base_path = "../../dataset/diabetes/" #2/3 Test bits_encoding = 20 train_data, train_label, test_data, test_label, data_min, data_max = util.load_3data(base_path) ths = [] for i in range(len(data_max)): ths.append(thermometer.Thermometer(data_min[i], data_max[i], bits_encoding)) train_bin = [] test_bin = [] i = 0 for data in train_data: train_bin.append(np.array([], dtype=bool)) t = 0 for v in data: binarr = ths[t].binarize(v) train_bin[i] = np.append(train_bin[i], binarr) t += 1 i += 1 i = 0 for data in test_data: test_bin.append(np.array([], dtype=bool)) t = 0 for v in data: binarr = ths[t].binarize(v) test_bin[i] = np.append(test_bin[i], binarr) t += 1 i += 1 #print test_label #Wisard num_classes = 2 tuple_list = [2, 4, 8, 14, 16, 18, 20, 22, 24, 26, 28, 30] acc_list = [] test_length = len(test_label) entry_size = len(train_bin[0]) #print entry_size for t in tuple_list: wisard = wnn.Wisard(entry_size, t, num_classes) wisard.train(train_bin, train_label) rank_result = wisard.rank(test_bin) num_hits = 0 for i in range(test_length): if rank_result[i] == test_label[i]: num_hits += 1 acc_list.append(float(num_hits)/float(test_length)) #Bloom Wisard btuple_list = [2, 4, 8, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 40, 56] bacc_list = [] #capacity = len(train_bin) capacity = 10 print capacity for t in btuple_list: bwisard = wnn.BloomWisard(entry_size, t, num_classes, capacity) bwisard.train(train_bin, train_label) rank_result = bwisard.rank(test_bin) num_hits = 0 for i in range(test_length): if rank_result[i] == test_label[i]: num_hits += 1 bacc_list.append(float(num_hits)/float(test_length)) print "Tuples=", tuple_list print "Wisard Accuracy=", acc_list print "Tuples=", btuple_list print "BloomWisard Accuracy=",bacc_list
2.203125
2
PyGame/pygame1/tutorial1/startercode.py
hoppfull/Legacy-Python
0
947
<reponame>hoppfull/Legacy-Python from pygamehelper import * from pygame import * from pygame.locals import * from vec2d import * from random import uniform import numpy as np class Starter(PygameHelper): def __init__(self): self.w, self.h = 800, 600 PygameHelper.__init__(self, size=(self.w, self.h), fill=((0,0,0))) def update(self): pass def keyUp(self, key): pass def mouseUp(self, button, pos): pass def mouseMotion(self, buttons, pos, rel): pass def draw(self): self.screen.fill((np.random.random()*255, np.random.random()*255, np.random.random()*255)) s = Starter() s.mainLoop(40)
2.8125
3
main.py
thewhiteninja/twitch-recorder
0
948
import glob import os import sys import utils from recorder import StreamRec OUTDIR = "" def parse_args(a): global OUTDIR i = 1 while i < len(a): if a[i] in ["-h", "--help", "/?"]: usage() if a[i] in ["-d", "--dir"]: OUTDIR = a[i + 1] i += 1 i += 1 def usage(): print("Record your favorite Twitch streams!") print("Check an example of .stream file in data/ to see how to add a stream to record") print() print("Usage: %s [Options]" % (os.path.basename(sys.argv[0]))) print() print("Options :") print(" -d, --dir : Output directory") print(" -h, --help : Help") sys.exit(1) def load_streams(): all_inst = [] stream_files = glob.glob('data/**/*.stream', recursive=True) for stream_file in stream_files: inst = StreamRec(stream_file, OUTDIR) all_inst.append(inst) for inst in all_inst: inst.start() for inst in all_inst: inst.join() def main(): utils.welcome() parse_args(sys.argv) utils.make_directory(OUTDIR) load_streams() if __name__ == '__main__': main()
2.921875
3
karbor-1.3.0/karbor/policies/protectables.py
scottwedge/OpenStack-Stein
0
949
# Copyright (c) 2017 Huawei Technologies Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from karbor.policies import base GET_POLICY = 'protectable:get' GET_ALL_POLICY = 'protectable:get_all' INSTANCES_GET_POLICY = 'protectable:instance_get' INSTANCES_GET_ALL_POLICY = 'protectable:instance_get_all' protectables_policies = [ policy.DocumentedRuleDefault( name=GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, description='Show a protectable type.', operations=[ { 'method': 'GET', 'path': '/protectables/{protectable_type}' } ]), policy.DocumentedRuleDefault( name=GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, description='List protectable types.', operations=[ { 'method': 'GET', 'path': '/protectables' } ]), policy.DocumentedRuleDefault( name=INSTANCES_GET_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, description='Show a protectable instance.', operations=[ { 'method': 'GET', 'path': '/protectables/{protectable_type}/' 'instances/{resource_id}' } ]), policy.DocumentedRuleDefault( name=INSTANCES_GET_ALL_POLICY, check_str=base.RULE_ADMIN_OR_OWNER, description='List protectable instances.', operations=[ { 'method': 'GET', 'path': '/protectables/{protectable_type}/instances' } ]), ] def list_rules(): return protectables_policies
1.765625
2
router.example.py
unyo/uhpackage
0
950
#!/usr/bin/python # -*- coding: utf-8 -*- # routers are dictionaries of URL routing parameters. # # For each request, the effective router is: # the built-in default base router (shown below), # updated by the BASE router in routes.py routers, # updated by the app-specific router in routes.py routers (if any), # updated by the app-specific router from applcations/app/routes.py routers (if any) # # # Router members: # # default_application: default application name # applications: list of all recognized applications, or 'ALL' to use all currently installed applications # Names in applications are always treated as an application names when they appear first in an incoming URL. # Set applications to None to disable the removal of application names from outgoing URLs. # domains: optional dict mapping domain names to application names # The domain name can include a port number: domain.com:8080 # The application name can include a controller: appx/ctlrx # path_prefix: a path fragment that is prefixed to all outgoing URLs and stripped from all incoming URLs # # Note: default_application, applications, domains & path_prefix are permitted only in the BASE router, # and domain makes sense only in an application-specific router. # The remaining members can appear in the BASE router (as defaults for all applications) # or in application-specific routers. # # default_controller: name of default controller # default_function: name of default function (all controllers) # controllers: list of valid controllers in selected app # or "DEFAULT" to use all controllers in the selected app plus 'static' # or None to disable controller-name removal. # Names in controllers are always treated as controller names when they appear in an incoming URL after # the (optional) application and language names. # languages: list of all supported languages # Names in controllers are always treated as language names when they appear in an incoming URL after # the (optional) application name. # default_language # The language code (for example: en, it-it) optionally appears in the URL following # the application (which may be omitted). For incoming URLs, the code is copied to # request.language; for outgoing URLs it is taken from request.language. # If languages=None, language support is disabled. # The default_language, if any, is omitted from the URL. # root_static: list of static files accessed from root # (mapped to the current application's static/ directory) # Each application has its own root-static files. # domain: the domain that maps to this application (alternative to using domains in the BASE router) # map_hyphen: If True (default), hyphens in incoming /a/c/f fields are converted to underscores, # and back to hyphens in outgoing URLs. Language, args and the query string are not affected. # map_static: By default, the default application is not stripped from static URLs. Set map_static=True # to override this policy. # acfe_match: regex for valid application, controller, function, extension /a/c/f.e # file_match: regex for valid file (used for static file names) # args_match: regex for valid args # This validation provides a measure of security. # If it is changed, the application perform its own validation. # # # The built-in default router supplies default values (undefined members are None): # # default_router = dict( # default_application = 'init', # applications = 'ALL', # default_controller = 'default', # controllers = 'DEFAULT', # default_function = 'index', # default_language = None, # languages = None, # root_static = ['favicon.ico', 'robots.txt'], # domains = None, # map_hyphen = True, # acfe_match = r'\w+$', # legal app/ctlr/fcn/ext # file_match = r'(\w+[-=./]?)+$', # legal file (path) name # args_match = r'([\w@ -]+[=.]?)+$', # legal arg in args # ) # # See rewrite.map_url_in() and rewrite.map_url_out() for implementation details. # This simple router set overrides only the default application name, # but provides full rewrite functionality. routers = dict( # base router BASE = dict( default_application = 'welcome', ), # 'admin' application router admin = dict( controllers = [], # don't remove controller names from admin URLs map_hyphen = False, # don't map hyphens to underscores ), ) # Error-handling redirects all HTTP errors (status codes >= 400) to a specified # path. If you wish to use error-handling redirects, uncomment the tuple # below. You can customize responses by adding a tuple entry with the first # value in 'appName/HTTPstatusCode' format. ( Only HTTP codes >= 400 are # routed. ) and the value as a path to redirect the user to. You may also use # '*' as a wildcard. # # The error handling page is also passed the error code and ticket as # variables. Traceback information will be stored in the ticket. # # routes_onerror = [ # (r'init/400', r'/init/default/login') # ,(r'init/*', r'/init/static/fail.html') # ,(r'*/404', r'/init/static/cantfind.html') # ,(r'*/*', r'/init/error/index') # ] # specify action in charge of error handling # # error_handler = dict(application='error', # controller='default', # function='index') # In the event that the error-handling page itself returns an error, web2py will # fall back to its old static responses. You can customize them here. # ErrorMessageTicket takes a string format dictionary containing (only) the # "ticket" key. # error_message = '<html><body><h1>Invalid request</h1></body></html>' # error_message_ticket = '<html><body><h1>Internal error</h1>Ticket issued: <a href="/admin/default/ticket/%(ticket)s" target="_blank">%(ticket)s</a></body></html>' def __routes_doctest(): ''' Dummy function for doctesting routes.py. Use filter_url() to test incoming or outgoing routes; filter_err() for error redirection. filter_url() accepts overrides for method and remote host: filter_url(url, method='get', remote='0.0.0.0', out=False) filter_err() accepts overrides for application and ticket: filter_err(status, application='app', ticket='tkt') >>> import os >>> import gluon.main >>> from gluon.rewrite import load, filter_url, filter_err, get_effective_router >>> load(routes=os.path.basename(__file__)) >>> filter_url('http://domain.com/abc', app=True) 'welcome' >>> filter_url('http://domain.com/welcome', app=True) 'welcome' >>> os.path.relpath(filter_url('http://domain.com/favicon.ico')) 'applications/welcome/static/favicon.ico' >>> filter_url('http://domain.com/abc') '/welcome/default/abc' >>> filter_url('http://domain.com/index/abc') "/welcome/default/index ['abc']" >>> filter_url('http://domain.com/default/abc.css') '/welcome/default/abc.css' >>> filter_url('http://domain.com/default/index/abc') "/welcome/default/index ['abc']" >>> filter_url('http://domain.com/default/index/a bc') "/welcome/default/index ['a bc']" >>> filter_url('http://domain.com/admin/bad!ctl') Traceback (most recent call last): ... HTTP: 400 BAD REQUEST [invalid controller] >>> filter_url('http://domain.com/admin/ctl/bad!fcn') Traceback (most recent call last): ... HTTP: 400 BAD REQUEST [invalid function] >>> filter_url('http://domain.com/admin/ctl/fcn.bad!ext') Traceback (most recent call last): ... HTTP: 400 BAD REQUEST [invalid extension] >>> filter_url('http://domain.com/admin/ctl/fcn/bad!arg') Traceback (most recent call last): ... HTTP: 400 BAD REQUEST [invalid arg <bad!arg>] >>> filter_url('https://domain.com/app/ctr/fcn', out=True) '/app/ctr/fcn' >>> filter_url('https://domain.com/welcome/ctr/fcn', out=True) '/ctr/fcn' >>> filter_url('https://domain.com/welcome/default/fcn', out=True) '/fcn' >>> filter_url('https://domain.com/welcome/default/index', out=True) '/' >>> filter_url('https://domain.com/welcome/appadmin/index', out=True) '/appadmin' >>> filter_url('http://domain.com/welcome/default/fcn?query', out=True) '/fcn?query' >>> filter_url('http://domain.com/welcome/default/fcn#anchor', out=True) '/fcn#anchor' >>> filter_url('http://domain.com/welcome/default/fcn?query#anchor', out=True) '/fcn?query#anchor' >>> filter_url('http://domain.com/appadmin/fcn-1') '/welcome/appadmin/fcn_1' >>> filter_url('http://domain.com/welcome/appadmin/fcn_1', out=True) '/appadmin/fcn-1' >>> filter_url('http://domain.com/examples/appadmin/fcn-1') '/examples/appadmin/fcn_1' >>> filter_url('http://domain.com/examples/appadmin/fcn_1', out=True) '/examples/appadmin/fcn-1' >>> filter_url('http://domain.com/app/static/filename-with_underscore', out=True) '/app/static/filename-with_underscore' >>> os.path.relpath(filter_url('http://domain.com/admin/static/filename-with_underscore')) 'applications/admin/static/filename-with_underscore' >>> filter_err(200) 200 >>> filter_err(399) 399 >>> filter_err(400) 400 ''' pass if __name__ == '__main__': import doctest doctest.testmod()
2.359375
2
notebooks/2018.11.09 Meeting.py
costrouc/uarray
0
951
<reponame>costrouc/uarray #%% from uarray.core import * #%% s = Scalar(Int(10)) #%% @operation def Always(a: T) -> CCallableUnary[T, CContent]: ... #%% register(Call(Always(w("a")), w("idx")), lambda a, idx: a) #%% a_ten = Always(s) #%% s = Sequence(Int(10), a_ten)
2.015625
2
var/spack/repos/builtin/packages/py-black/package.py
dwstreetNNL/spack
0
952
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyBlack(PythonPackage): """Black is the uncompromising Python code formatter. By using it, you agree to cede control over minutiae of hand-formatting. In return, Black gives you speed, determinism, and freedom from pycodestyle nagging about formatting. """ homepage = "https://github.com/psf/black" url = "https://pypi.io/packages/source/b/black/black-20.8b1.tar.gz" version('20.8b1', sha256='1c02557aa099101b9d21496f8a914e9ed2222ef70336404eeeac8edba836fbea') version('19.3b0', sha256='68950ffd4d9169716bcb8719a56c07a2f4485354fec061cdd5910aa07369731c') version('18.9b0', sha256='e030a9a28f542debc08acceb273f228ac422798e5215ba2a791a6ddeaaca22a5') variant('d', default=False, description='enable blackd HTTP server') depends_on('[email protected]:') # Needs setuptools at runtime so that `import pkg_resources` succeeds # See #8843 and #8689 for examples of setuptools added as a runtime dep depends_on('py-setuptools', type=('build', 'run')) # Translated from black's setup.py: # https://github.com/ambv/black/blob/master/setup.py depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', when='@20.8b1:', type=('build', 'run')) depends_on('[email protected]:', when='@:20.8b0', type=('build', 'run')) depends_on('py-appdirs', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', when='@20.8b1:', type=('build', 'run')) depends_on('[email protected]:', when='@19.10b0:', type=('build', 'run')) depends_on('[email protected]:', when='@20.8b0:', type=('build', 'run')) depends_on('[email protected]:0.999', when='@19.10b0:', type=('build', 'run')) depends_on('[email protected]:', when='@20.8b0:^python@:3.6', type=('build', 'run')) depends_on('[email protected]:', when='@20.8b0:', type=('build', 'run')) depends_on('[email protected]:', when='@20.8b0:', type=('build', 'run')) depends_on('[email protected]:', when='+d', type=('build', 'run')) depends_on('py-aiohttp-cors', when='+d', type=('build', 'run')) @property def import_modules(self): modules = ['blib2to3', 'blib2to3.pgen2', 'black'] if '+d' in self.spec: modules.append('blackd') return modules
1.804688
2
store/adminshop/templatetags/admin_extras.py
vallemrv/my_store_test
0
953
<filename>store/adminshop/templatetags/admin_extras.py # -*- coding: utf-8 -*- # @Author: <NAME> <valle> # @Date: 27-Aug-2017 # @Email: <EMAIL> # @Filename: admin_extras.py # @Last modified by: valle # @Last modified time: 02-Feb-2018 # @License: Apache license vesion 2.0 from django import template from django.db.models import Q try: from django.core.urlresolvers import reverse except ImportError: from django.urls import reverse from adminshop.models import Testeo, Compras, Presupuesto import json import sys register = template.Library() @register.filter(name='get_nombre_cliente') def get_nombre_cliente(f): return f.get_nombre_cliente() @register.filter(name='enviado') def enviado(f): return "No" if not f.enviado else "Si" @register.filter(name='get_user') def get_user(f): return f.get_user() @register.filter(name='get_ns_imei') def get_ns_imei(f): return f.get_ns_imei() @register.filter(name='get_producto_pk') def get_producto_pk(f): return f.get_producto_pk() @register.filter(name='addcss') def addcss(field, css): return field.as_widget(attrs={"class":css}) @register.filter(name='reparacion') def reparacion(p): try: pres = Presupuesto.objects.filter(producto__pk=p.id)[0] return pres.notas_cliente except: return "" @register.filter(name='num_pres') def num_pres(p): try: pres = Presupuesto.objects.filter(producto__pk=p.id)[0] return pres.pk except: return -1 @register.filter(name='precio_venta') def precio_venta(p): precio = 0 if p.precio_venta == None else p.precio_venta return "{0:.2f} €".format(precio) @register.filter(name='precio_usado') def precio_usado(p): return "{0:.2f} €".format(p.modelo.precio_usado * p.tipo.incremento) @register.filter(name='document_show') def document_show(p): compras = Compras.objects.filter(producto__id=p.pk) if len(compras) > 0: compra = compras[0] else: compra = Compras() return p.estado in ["ST", "VD", "OL", "VT"] @register.filter(name='document_href') def document_href(p): if p.estado in ["ST", "VT", "OL"]: return reverse("get_document_by_id", args=[p.pk]) elif p.estado in ["RP", "OK", "PD"]: return reverse("get_presupuesto_pdf", args=[p.pk]) elif p.estado == "VD": return reverse("get_all_document", args=[p.pk]) else: return "#" @register.filter(name='have_sign') def have_sign(p): compras = Compras.objects.filter(producto__id=p.pk) compra = Compras() if len(compras) > 0: compra = compras[0] return p.estado in ["ST", "VD", "OL", "VT"] and compra.firma == "" @register.filter(name='editable') def editable(p): return p.estado in ["ST", "OL", "VT"] @register.simple_tag(name='get_estado_value') def get_estado_value(test_id, p_id, estado): testeos = Testeo.objects.filter(Q(descripcion__pk=test_id) & Q(producto__pk=p_id)) send = "" if len(testeos) > 0 and testeos[0].estado == estado: send = "selected" return send @register.filter(name='addattrs') def addattrs(field, args): attr = {} try: args_parse = args.replace("'", '"') attr = json.loads(args_parse) except Exception as error: print(error) return field.as_widget(attrs=attr) @register.filter('klass') def klass(ob): return ob.field.widget.__class__.__name__ @register.filter('display') def display(form, value): return dict(form.field.choices)[value] @register.filter('modelo') def modelo(p): if p.modelo != None: return str(p.modelo) else: return p.detalle @register.filter('marca') def marca(p): if p.modelo != None: return str(p.modelo.marca) else: return ""
1.9375
2
doc/tutorial/using_gpu_solution_1.py
abdalazizrashid/Theano-PyMC
0
954
<gh_stars>0 #!/usr/bin/env python # Aesara tutorial # Solution to Exercise in section 'Using the GPU' # 1. Raw results import numpy as np import aesara import aesara.tensor as tt aesara.config.floatX = "float32" rng = np.random N = 400 feats = 784 D = ( rng.randn(N, feats).astype(aesara.config.floatX), rng.randint(size=N, low=0, high=2).astype(aesara.config.floatX), ) training_steps = 10000 # Declare Aesara symbolic variables x = aesara.shared(D[0], name="x") y = aesara.shared(D[1], name="y") w = aesara.shared(rng.randn(feats).astype(aesara.config.floatX), name="w") b = aesara.shared(np.asarray(0.0, dtype=aesara.config.floatX), name="b") x.tag.test_value = D[0] y.tag.test_value = D[1] # print "Initial model:" # print w.get_value(), b.get_value() # Construct Aesara expression graph p_1 = 1 / (1 + tt.exp(-tt.dot(x, w) - b)) # Probability of having a one prediction = p_1 > 0.5 # The prediction that is done: 0 or 1 xent = -y * tt.log(p_1) - (1 - y) * tt.log(1 - p_1) # Cross-entropy cost = tt.cast(xent.mean(), "float32") + 0.01 * (w ** 2).sum() # The cost to optimize gw, gb = tt.grad(cost, [w, b]) # Compile expressions to functions train = aesara.function( inputs=[], outputs=[prediction, xent], updates=[(w, w - 0.01 * gw), (b, b - 0.01 * gb)], name="train", ) predict = aesara.function(inputs=[], outputs=prediction, name="predict") if any( [ n.op.__class__.__name__ in ["Gemv", "CGemv", "Gemm", "CGemm"] for n in train.maker.fgraph.toposort() ] ): print("Used the cpu") elif any( [ n.op.__class__.__name__ in ["GpuGemm", "GpuGemv"] for n in train.maker.fgraph.toposort() ] ): print("Used the gpu") else: print("ERROR, not able to tell if aesara used the cpu or the gpu") print(train.maker.fgraph.toposort()) for i in range(training_steps): pred, err = train() # print "Final model:" # print w.get_value(), b.get_value() print("target values for D") print(D[1]) print("prediction on D") print(predict()) """ # 2. Profiling # 2.1 Profiling for CPU computations # In your terminal, type: $ THEANO_FLAGS=profile=True,device=cpu python using_gpu_solution_1.py # You'll see first the output of the script: Used the cpu target values for D prediction on D # Followed by the output of profiling.. You'll see profiling results for each function # in the script, followed by a summary for all functions. # We'll show here only the summary: Results were produced using an Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz Function profiling ================== Message: Sum of all(2) printed profiles at exit excluding Scan op profile. Time in 10001 calls to Function.__call__: 1.300452e+00s Time in Function.fn.__call__: 1.215823e+00s (93.492%) Time in thunks: 1.157602e+00s (89.015%) Total compile time: 8.922548e-01s Number of Apply nodes: 17 Aesara Optimizer time: 6.270301e-01s Aesara validate time: 5.993605e-03s Aesara Linker time (includes C, CUDA code generation/compiling): 2.949309e-02s Import time 3.543139e-03s Time in all call to aesara.grad() 1.848292e-02s Time since aesara import 2.864s Class --- <% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name> 64.5% 64.5% 0.747s 3.73e-05s C 20001 3 aesara.tensor.blas_c.CGemv 33.1% 97.7% 0.384s 4.79e-06s C 80001 9 aesara.tensor.elemwise.Elemwise 1.0% 98.6% 0.011s 1.14e-06s C 10000 1 aesara.tensor.elemwise.Sum 0.7% 99.4% 0.009s 2.85e-07s C 30001 4 aesara.tensor.elemwise.DimShuffle 0.3% 99.7% 0.004s 3.64e-07s C 10001 2 aesara.tensor.basic.AllocEmpty 0.3% 100.0% 0.004s 1.78e-07s C 20001 3 aesara.compile.ops.Shape_i ... (remaining 0 Classes account for 0.00%(0.00s) of the runtime) Ops --- <% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name> 64.5% 64.5% 0.747s 3.73e-05s C 20001 3 CGemv{inplace} 18.7% 83.2% 0.217s 2.17e-05s C 10000 1 Elemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[(0, 4)] 8.9% 92.1% 0.103s 1.03e-05s C 10000 1 Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)] 4.3% 96.4% 0.050s 4.98e-06s C 10000 1 Elemwise{Composite{GT(scalar_sigmoid(i0), i1)}} 1.0% 97.4% 0.011s 1.14e-06s C 10000 1 Sum{acc_dtype=float64} 0.5% 97.9% 0.006s 2.83e-07s C 20001 3 InplaceDimShuffle{x} 0.4% 98.3% 0.004s 4.22e-07s C 10000 1 Elemwise{sub,no_inplace} 0.3% 98.6% 0.004s 3.70e-07s C 10000 1 Elemwise{neg,no_inplace} 0.3% 98.9% 0.004s 3.64e-07s C 10001 2 AllocEmpty{dtype='float32'} 0.3% 99.2% 0.004s 1.78e-07s C 20001 3 Shape_i{0} 0.2% 99.5% 0.003s 2.88e-07s C 10000 1 InplaceDimShuffle{1,0} 0.2% 99.7% 0.003s 2.65e-07s C 10000 1 Elemwise{Composite{((-i0) - i1)}}[(0, 0)] 0.2% 99.9% 0.002s 1.98e-07s C 10000 1 Elemwise{Cast{float32}} 0.1% 100.0% 0.002s 1.54e-07s C 10000 1 Elemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)] 0.0% 100.0% 0.000s 4.77e-06s C 1 1 Elemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}} ... (remaining 0 Ops account for 0.00%(0.00s) of the runtime) Apply ------ <% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name> 34.0% 34.0% 0.394s 3.94e-05s 10000 7 CGemv{inplace}(AllocEmpty{dtype='float32'}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0}) 30.5% 64.5% 0.353s 3.53e-05s 10000 15 CGemv{inplace}(w, TensorConstant{-0.00999999977648}, x.T, Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)].0, TensorConstant{0.999800026417}) 18.7% 83.2% 0.217s 2.17e-05s 10000 12 Elemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[(0, 4)](y, Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0, TensorConstant{(1,) of -1.0}, Elemwise{sub,no_inplace}.0, Elemwise{neg,no_inplace}.0) 8.9% 92.1% 0.103s 1.03e-05s 10000 13 Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)](Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0, TensorConstant{(1,) of -1.0}, y, Elemwise{Cast{float32}}.0, Elemwise{sub,no_inplace}.0) 4.3% 96.4% 0.050s 4.98e-06s 10000 11 Elemwise{Composite{GT(scalar_sigmoid(i0), i1)}}(Elemwise{neg,no_inplace}.0, TensorConstant{(1,) of 0.5}) 1.0% 97.4% 0.011s 1.14e-06s 10000 14 Sum{acc_dtype=float64}(Elemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((scalar_sigmoid((-i0)) * i1 * i4) / i3))}}[(0, 0)].0) 0.4% 97.8% 0.004s 4.22e-07s 10000 4 Elemwise{sub,no_inplace}(TensorConstant{(1,) of 1.0}, y) 0.3% 98.1% 0.004s 3.76e-07s 10000 0 InplaceDimShuffle{x}(b) 0.3% 98.4% 0.004s 3.70e-07s 10000 10 Elemwise{neg,no_inplace}(Elemwise{Composite{((-i0) - i1)}}[(0, 0)].0) 0.3% 98.7% 0.004s 3.64e-07s 10000 5 AllocEmpty{dtype='float32'}(Shape_i{0}.0) 0.2% 99.0% 0.003s 2.88e-07s 10000 2 InplaceDimShuffle{1,0}(x) 0.2% 99.2% 0.003s 2.65e-07s 10000 9 Elemwise{Composite{((-i0) - i1)}}[(0, 0)](CGemv{inplace}.0, InplaceDimShuffle{x}.0) 0.2% 99.4% 0.002s 2.21e-07s 10000 1 Shape_i{0}(x) 0.2% 99.6% 0.002s 1.98e-07s 10000 8 Elemwise{Cast{float32}}(InplaceDimShuffle{x}.0) 0.2% 99.7% 0.002s 1.90e-07s 10000 6 InplaceDimShuffle{x}(Shape_i{0}.0) 0.1% 99.9% 0.002s 1.54e-07s 10000 16 Elemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)](b, TensorConstant{0.00999999977648}, Sum{acc_dtype=float64}.0) 0.1% 100.0% 0.001s 1.34e-07s 10000 3 Shape_i{0}(y) 0.0% 100.0% 0.000s 3.89e-05s 1 3 CGemv{inplace}(AllocEmpty{dtype='float32'}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0}) 0.0% 100.0% 0.000s 4.77e-06s 1 4 Elemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}}(CGemv{inplace}.0, InplaceDimShuffle{x}.0, TensorConstant{(1,) of 0.5}) 0.0% 100.0% 0.000s 1.19e-06s 1 0 InplaceDimShuffle{x}(b) ... (remaining 2 Apply instances account for 0.00%(0.00s) of the runtime) # 2.2 Profiling for GPU computations # In your terminal, type: $ CUDA_LAUNCH_BLOCKING=1 THEANO_FLAGS=profile=True,device=cuda python using_gpu_solution_1.py # You'll see first the output of the script: Used the gpu target values for D prediction on D Results were produced using a GeForce GTX TITAN X # Profiling summary for all functions: Function profiling ================== Message: Sum of all(2) printed profiles at exit excluding Scan op profile. Time in 10001 calls to Function.__call__: 4.181247e+00s Time in Function.fn.__call__: 4.081113e+00s (97.605%) Time in thunks: 3.915566e+00s (93.646%) Total compile time: 9.256095e+00s Number of Apply nodes: 21 Aesara Optimizer time: 9.996419e-01s Aesara validate time: 6.523132e-03s Aesara Linker time (includes C, CUDA code generation/compiling): 8.239602e+00s Import time 4.228115e-03s Time in all call to aesara.grad() 3.286195e-02s Time since aesara import 15.415s Class --- <% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name> 59.5% 59.5% 2.329s 1.16e-04s C 20001 3 aesara.sandbox.gpuarray.blas.GpuGemv 29.8% 89.3% 1.166s 1.30e-05s C 90001 10 aesara.sandbox.gpuarray.elemwise.GpuElemwise 4.1% 93.4% 0.162s 8.10e-06s C 20001 3 aesara.sandbox.gpuarray.basic_ops.HostFromGpu 3.3% 96.7% 0.131s 1.31e-05s C 10000 1 aesara.sandbox.gpuarray.elemwise.GpuCAReduceCuda 1.6% 98.3% 0.061s 6.10e-06s C 10000 1 aesara.sandbox.gpuarray.basic_ops.GpuFromHost 0.8% 99.1% 0.033s 1.09e-06s C 30001 4 aesara.sandbox.gpuarray.elemwise.GpuDimShuffle 0.7% 99.8% 0.026s 2.59e-06s C 10001 2 aesara.sandbox.gpuarray.basic_ops.GpuAllocEmpty 0.2% 100.0% 0.008s 3.95e-07s C 20001 3 aesara.compile.ops.Shape_i ... (remaining 0 Classes account for 0.00%(0.00s) of the runtime) Ops --- <% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name> 59.5% 59.5% 2.329s 1.16e-04s C 20001 3 GpuGemv{inplace=True} 4.1% 63.6% 0.162s 8.10e-06s C 20001 3 HostFromGpu(gpuarray) 4.0% 67.6% 0.157s 1.57e-05s C 10000 1 GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray> 3.8% 71.4% 0.149s 1.49e-05s C 10000 1 GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray> 3.7% 75.1% 0.144s 1.44e-05s C 10000 1 GpuElemwise{sub,no_inplace} 3.6% 78.7% 0.141s 1.41e-05s C 10000 1 GpuElemwise{gt,no_inplace} 3.4% 82.1% 0.133s 1.33e-05s C 10000 1 GpuElemwise{Cast{float32}}[]<gpuarray> 3.4% 85.5% 0.133s 1.33e-05s C 10000 1 GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray> 3.3% 88.8% 0.131s 1.31e-05s C 10000 1 GpuCAReduceCuda{add} 2.9% 91.7% 0.112s 1.12e-05s C 10000 1 GpuElemwise{neg,no_inplace} 2.6% 94.3% 0.102s 1.02e-05s C 10000 1 GpuElemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)]<gpuarray> 2.5% 96.7% 0.096s 9.63e-06s C 10000 1 GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray> 1.6% 98.3% 0.061s 6.10e-06s C 10000 1 GpuFromHost<None> 0.7% 99.0% 0.026s 2.59e-06s C 10001 2 GpuAllocEmpty{dtype='float32', context_name=None} 0.5% 99.5% 0.021s 1.06e-06s C 20001 3 InplaceGpuDimShuffle{x} 0.3% 99.8% 0.011s 1.14e-06s C 10000 1 InplaceGpuDimShuffle{1,0} 0.2% 100.0% 0.008s 3.95e-07s C 20001 3 Shape_i{0} 0.0% 100.0% 0.000s 2.00e-05s C 1 1 GpuElemwise{Composite{GT(scalar_sigmoid((-((-i0) - i1))), i2)}}[]<gpuarray> ... (remaining 0 Ops account for 0.00%(0.00s) of the runtime) Apply ------ <% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name> 55.0% 55.0% 2.154s 2.15e-04s 10000 7 GpuGemv{inplace=True}(GpuAllocEmpty{dtype='float32', context_name=None}.0, TensorConstant{1.0}, x, w, TensorConstant{0.0}) 4.5% 59.5% 0.176s 1.76e-05s 10000 18 GpuGemv{inplace=True}(w, TensorConstant{-0.00999999977648}, InplaceGpuDimShuffle{1,0}.0, GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>.0, TensorConstant{0.999800026417}) 4.0% 63.5% 0.157s 1.57e-05s 10000 12 GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray>(y, GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[-1.]}, GpuElemwise{sub,no_inplace}.0, GpuElemwise{neg,no_inplace}.0) 3.8% 67.3% 0.149s 1.49e-05s 10000 15 GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>(GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[-1.]}, y, GpuElemwise{Cast{float32}}[]<gpuarray>.0, GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>.0, GpuElemwise{sub,no_inplace}.0) 3.7% 71.0% 0.144s 1.44e-05s 10000 4 GpuElemwise{sub,no_inplace}(GpuArrayConstant{[ 1.]}, y) 3.6% 74.6% 0.141s 1.41e-05s 10000 16 GpuElemwise{gt,no_inplace}(GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>.0, GpuArrayConstant{[ 0.5]}) 3.4% 78.0% 0.133s 1.33e-05s 10000 10 GpuElemwise{Cast{float32}}[]<gpuarray>(InplaceGpuDimShuffle{x}.0) 3.4% 81.4% 0.133s 1.33e-05s 10000 9 GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>(GpuGemv{inplace=True}.0, InplaceGpuDimShuffle{x}.0) 3.3% 84.7% 0.131s 1.31e-05s 10000 17 GpuCAReduceCuda{add}(GpuElemwise{Composite{(((scalar_sigmoid(i0) * i1 * i2) / i3) - ((i4 * i1 * i5) / i3))}}[(0, 0)]<gpuarray>.0) 2.9% 87.5% 0.112s 1.12e-05s 10000 11 GpuElemwise{neg,no_inplace}(GpuElemwise{Composite{((-i0) - i1)}}[(0, 0)]<gpuarray>.0) 2.6% 90.1% 0.102s 1.02e-05s 10000 20 GpuElemwise{Composite{(i0 - (i1 * i2))}}[(0, 0)]<gpuarray>(b, GpuArrayConstant{0.00999999977648}, GpuCAReduceCuda{add}.0) 2.5% 92.6% 0.096s 9.63e-06s 10000 13 GpuElemwise{ScalarSigmoid}[(0, 0)]<gpuarray>(GpuElemwise{neg,no_inplace}.0) 2.3% 94.9% 0.090s 9.04e-06s 10000 19 HostFromGpu(gpuarray)(GpuElemwise{gt,no_inplace}.0) 1.8% 96.7% 0.072s 7.16e-06s 10000 14 HostFromGpu(gpuarray)(GpuElemwise{Composite{((i0 * scalar_softplus(i1)) - (i2 * i3 * scalar_softplus(i4)))}}[]<gpuarray>.0) 1.6% 98.3% 0.061s 6.10e-06s 10000 6 GpuFromHost<None>(Shape_i{0}.0) 0.7% 99.0% 0.026s 2.59e-06s 10000 5 GpuAllocEmpty{dtype='float32', context_name=None}(Shape_i{0}.0) 0.3% 99.3% 0.013s 1.33e-06s 10000 0 InplaceGpuDimShuffle{x}(b) 0.3% 99.6% 0.011s 1.14e-06s 10000 2 InplaceGpuDimShuffle{1,0}(x) 0.2% 99.8% 0.008s 7.94e-07s 10000 8 InplaceGpuDimShuffle{x}(GpuFromHost<None>.0) 0.1% 99.9% 0.005s 5.27e-07s 10000 1 Shape_i{0}(x) ... (remaining 7 Apply instances account for 0.07%(0.00s) of the runtime) # 3. Conclusions Examine and compare 'Ops' summaries for CPU and GPU. Usually GPU ops 'GpuFromHost' and 'HostFromGpu' by themselves consume a large amount of extra time, but by making as few as possible data transfers between GPU and CPU, you can minimize their overhead. Notice that each of the GPU ops consumes more time than its CPU counterpart. This is because the ops operate on small inputs; if you increase the input data size (e.g. set N = 4000), you will see a gain from using the GPU. """
2.46875
2
chainercv/transforms/bbox/translate_bbox.py
souravsingh/chainercv
1
955
def translate_bbox(bbox, y_offset=0, x_offset=0): """Translate bounding boxes. This method is mainly used together with image transforms, such as padding and cropping, which translates the left top point of the image from coordinate :math:`(0, 0)` to coordinate :math:`(y, x) = (y_{offset}, x_{offset})`. The bounding boxes are expected to be packed into a two dimensional tensor of shape :math:`(R, 4)`, where :math:`R` is the number of bounding boxes in the image. The second axis represents attributes of the bounding box. They are :math:`(y_{min}, x_{min}, y_{max}, x_{max})`, where the four attributes are coordinates of the top left and the bottom right vertices. Args: bbox (~numpy.ndarray): Bounding boxes to be transformed. The shape is :math:`(R, 4)`. :math:`R` is the number of bounding boxes. y_offset (int or float): The offset along y axis. x_offset (int or float): The offset along x axis. Returns: ~numpy.ndarray: Bounding boxes translated according to the given offsets. """ out_bbox = bbox.copy() out_bbox[:, :2] += (y_offset, x_offset) out_bbox[:, 2:] += (y_offset, x_offset) return out_bbox
3.671875
4
behave/features/environment.py
ministryofjustice/cla-end-to-end-tests
1
956
<gh_stars>1-10 import os from configparser import ConfigParser from helper.helper_web import get_browser def before_all(context): config = ConfigParser() print((os.path.join(os.getcwd(), 'setup.cfg'))) my_file = (os.path.join(os.getcwd(), 'setup.cfg')) config.read(my_file) # Reading the browser type from the configuration file helper_func = get_browser(config.get('Environment', 'Browser')) context.helperfunc = helper_func def after_all(context): context.helperfunc.close()
2.3125
2
recipe_organizer/gui/recipe_list/recipe_source.py
j-sommer/recipe-organizer
0
957
<filename>recipe_organizer/gui/recipe_list/recipe_source.py from pathlib import Path from tkinter import Frame, Label from recipe_organizer.events.event import Event, EventType from recipe_organizer.events.event_observer import EventObserver from recipe_organizer.events.event_publisher import EventPublisher from recipe_organizer.gui.interfaces.widget_container import WidgetContainer from recipe_organizer.gui.recipe_summary.recipe_summary import RecipeSummary from recipe_organizer.recipe.recipe import Recipe class RecipeSource(Frame, WidgetContainer, EventObserver): _MAX_COLUMN_COUNT = 6 _label_source_directory: Label _recipe_summaries: [RecipeSummary] = [] _row_index = 0 def __init__(self, parent): Frame.__init__(self, parent) self.define_widgets() self.define_layout() EventPublisher.add(self) def define_widgets(self) -> None: self._label_source_directory = Label(self, text="-") def define_layout(self) -> None: self._label_source_directory.grid(row=self.__get_row_index()) def notify(self, event: Event) -> None: if event.event_type == EventType.SOURCE_SET: self._label_source_directory.configure(text=event.payload.name) self.__load_recipes(event.payload) def __get_row_index(self) -> int: current_index = self._row_index self._row_index += 1 return current_index def __load_recipes(self, directory: Path): recipes: [Recipe] = [] file_paths = directory.glob("**/*.json") for file_path in file_paths: with open(file_path, "r", encoding="utf-8") as file: json_data = file.read() try: recipe = Recipe.from_json(json_data) except KeyError: pass else: recipes.append(recipe) self.__create_list(recipes) def __create_list(self, recipes: [Recipe]): current_row_index = self.__get_row_index() for index, recipe in enumerate(recipes): if index % self._MAX_COLUMN_COUNT == 0: current_row_index = self.__get_row_index() recipe_summary = RecipeSummary(self, recipe) recipe_summary.grid(row=current_row_index, column=index % self._MAX_COLUMN_COUNT, padx=16, pady=10) self.columnconfigure(index, minsize=200) self._recipe_summaries.append(recipe_summary)
2.390625
2
var/spack/repos/builtin/packages/spot/package.py
xiki-tempula/spack
1
958
<filename>var/spack/repos/builtin/packages/spot/package.py # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Spot(AutotoolsPackage): """Spot is a C++11 library for omega-automata manipulation and model checking.""" homepage = "https://spot.lrde.epita.fr/" url = "http://www.lrde.epita.fr/dload/spot/spot-1.99.3.tar.gz" version('1.99.3', sha256='86964af559994af4451a8dca663a9e1db6e869ed60e747ab60ce72dddc31b61b') version('1.2.6', sha256='360678c75f6741f697e8e56cdbc9937f104eb723a839c3629f0dc5dc6de11bfc') variant('python', default=True, description='Enable python API') depends_on("[email protected]:", when='@1.99.5: +python') depends_on("[email protected]:", when='@1.99: +python') depends_on("python@2:", when='+python') depends_on('boost', when='@:1.2.6')
1.273438
1
0.py
itspuneet/itspuneet
0
959
<reponame>itspuneet/itspuneet k=0 while k!=1: print(k) k+=1
3.109375
3
jss_figures_replication_script.py
Cole-vJ/AdvEMDpy
0
960
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([np.pi, (3 / 2) * np.pi]) axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].set_ylim(-5.5, 5.5) axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi) plt.savefig('jss_figures/DFA_different_trends_zoomed.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False) # plot 6c ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50)) x_hs, y, z = hs_ouputs z_min, z_max = 0, np.abs(z).max() ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3) ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3) ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3) ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi]) ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$']) plt.ylabel(r'Frequency (rad.s$^{-1}$)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/DFA_hilbert_spectrum.png') plt.show() # plot 6c time = np.linspace(0, 5 * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 51) fluc = Fluctuation(time=time, time_series=time_series) max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=False) max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=True) min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=False) min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=True) util = Utility(time=time, time_series=time_series) maxima = util.max_bool_func_1st_order_fd() minima = util.min_bool_func_1st_order_fd() ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50)) plt.plot(time, time_series, label='Time series', zorder=2, LineWidth=2) plt.scatter(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10) plt.scatter(time[minima], time_series[minima], c='b', label='Minima', zorder=10) plt.plot(time, max_unsmoothed[0], label=textwrap.fill('Unsmoothed maxima envelope', 10), c='darkorange') plt.plot(time, max_smoothed[0], label=textwrap.fill('Smoothed maxima envelope', 10), c='red') plt.plot(time, min_unsmoothed[0], label=textwrap.fill('Unsmoothed minima envelope', 10), c='cyan') plt.plot(time, min_smoothed[0], label=textwrap.fill('Smoothed minima envelope', 10), c='blue') for knot in knots[:-1]: plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1) plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1) plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png') plt.show() # plot 7 a = 0.25 width = 0.2 time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001) knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] inflection_bool = utils.inflection_point() inflection_x = time[inflection_bool] inflection_y = time_series[inflection_bool] fluctuation = emd_mean.Fluctuation(time=time, time_series=time_series) maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='inflection_points')[0] binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='binomial_average', order=21, increment=20)[0] derivative_of_lsq = utils.derivative_forward_diff() derivative_time = time[:-1] derivative_knots = np.linspace(knots[0], knots[-1], 31) # change (1) detrended_fluctuation_technique and (2) max_internal_iter and (3) debug (confusing with external debugging) emd = AdvEMDpy.EMD(time=derivative_time, time_series=derivative_of_lsq) imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots, knot_time=derivative_time, text=False, verbose=False)[0][1, :] utils = emd_utils.Utility(time=time[:-1], time_series=imf_1_of_derivative) optimal_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \ np.r_[utils.zero_crossing() == 1, False] optimal_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \ np.r_[utils.zero_crossing() == 1, False] EEMD_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'maxima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] EEMD_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'minima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Detrended Fluctuation Analysis Examples') plt.plot(time, time_series, LineWidth=2, label='Time series') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(time[optimal_maxima], time_series[optimal_maxima], c='darkred', zorder=4, label=textwrap.fill('Optimal maxima', 10)) plt.scatter(time[optimal_minima], time_series[optimal_minima], c='darkblue', zorder=4, label=textwrap.fill('Optimal minima', 10)) plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10)) plt.plot(time, maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10)) plt.plot(time, minima_envelope, c='darkblue') plt.plot(time, (maxima_envelope + minima_envelope) / 2, c='darkblue') plt.plot(time, maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10)) plt.plot(time, minima_envelope_smooth, c='darkred') plt.plot(time, (maxima_envelope_smooth + minima_envelope_smooth) / 2, c='darkred') plt.plot(time, EEMD_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10)) plt.plot(time, EEMD_minima_envelope, c='darkgreen') plt.plot(time, (EEMD_maxima_envelope + EEMD_minima_envelope) / 2, c='darkgreen') plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10)) plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10)) plt.plot(time, np.cos(time), c='black', label='True mean') plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/detrended_fluctuation_analysis.png') plt.show() # Duffing Equation Example def duffing_equation(xy, ts): gamma = 0.1 epsilon = 1 omega = ((2 * np.pi) / 25) return [xy[1], xy[0] - epsilon * xy[0] ** 3 + gamma * np.cos(omega * ts)] t = np.linspace(0, 150, 1501) XY0 = [1, 1] solution = odeint(duffing_equation, XY0, t) x = solution[:, 0] dxdt = solution[:, 1] x_points = [0, 50, 100, 150] x_names = {0, 50, 100, 150} y_points_1 = [-2, 0, 2] y_points_2 = [-1, 0, 1] fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.2) axs[0].plot(t, x) axs[0].set_title('Duffing Equation Displacement') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, dxdt) axs[1].set_title('Duffing Equation Velocity') axs[1].set_ylim([-1.5, 1.5]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel('x(t)') ax.set_yticks(y_points_1) if axis == 1: ax.set_ylabel(r'$ \dfrac{dx(t)}{dt} $') ax.set(xlabel='t') ax.set_yticks(y_points_2) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation.png') plt.show() # compare other packages Duffing - top pyemd = pyemd0215() py_emd = pyemd(x) IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png') plt.show() plt.show() emd_sift = emd040.sift.sift(x) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_emd.png') plt.show() # compare other packages Duffing - bottom emd_duffing = AdvEMDpy.EMD(time=t, time_series=x) emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False) fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.3) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy') axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10') axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3') axs[0].set_title('IMF 1') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy') print(f'AdvEMDpy driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}') axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10') print(f'PyEMD driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - py_emd[1, :])), 3)}') axs[1].plot(t, emd_sift[:, 1], '--', label='emd 0.3.3') print(f'emd driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_sift[:, 1])), 3)}') axs[1].plot(t, 0.1 * np.cos(0.04 * 2 * np.pi * t), '--', label=r'$0.1$cos$(0.08{\pi}t)$') axs[1].set_title('IMF 2') axs[1].set_ylim([-0.2, 0.4]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel(r'$\gamma_1(t)$') ax.set_yticks([-2, 0, 2]) if axis == 1: ax.set_ylabel(r'$\gamma_2(t)$') ax.set_yticks([-0.2, 0, 0.2]) box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation_imfs.png') plt.show() hs_ouputs = hilbert_spectrum(t, emd_duff, emd_ht_duff, emd_if_duff, max_frequency=1.3, plot=False) ax = plt.subplot(111) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40)) x, y, z = hs_ouputs y = y / (2 * np.pi) z_min, z_max = 0, np.abs(z).max() figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) ax.pcolormesh(x, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht.png') plt.show() # Carbon Dioxide Concentration Example CO2_data = pd.read_csv('Data/co2_mm_mlo.csv', header=51) plt.plot(CO2_data['month'], CO2_data['decimal date']) plt.title(textwrap.fill('Mean Monthly Concentration of Carbon Dioxide in the Atmosphere', 35)) plt.ylabel('Parts per million') plt.xlabel('Time (years)') plt.savefig('jss_figures/CO2_concentration.png') plt.show() signal = CO2_data['decimal date'] signal = np.asarray(signal) time = CO2_data['month'] time = np.asarray(time) # compare other packages Carbon Dioxide - top pyemd = pyemd0215() py_emd = pyemd(signal) IP, IF, IA = emd040.spectra.frequency_transform(py_emd[:2, :].T, 12, 'hilbert') print(f'PyEMD annual frequency error: {np.round(sum(np.abs(IF[:, 0] - np.ones_like(IF[:, 0]))), 3)}') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) fig, ax = plt.subplots() figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using PyEMD 0.2.10', 45)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.pcolormesh(time, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10)) box_0 = ax.get_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert_pyemd.png') plt.show() emd_sift = emd040.sift.sift(signal) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift[:, :1], 12, 'hilbert') print(f'emd annual frequency error: {np.round(sum(np.abs(IF - np.ones_like(IF)))[0], 3)}') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) fig, ax = plt.subplots() figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using emd 0.3.3', 45)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.pcolormesh(time, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(time, np.ones_like(time), 'k--', label=textwrap.fill('Annual cycle', 10)) box_0 = ax.get_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert_emd.png') plt.show() # compare other packages Carbon Dioxide - bottom knots = np.linspace(time[0], time[-1], 200) emd_example = AdvEMDpy.EMD(time=time, time_series=signal) imfs, hts, ifs, _, _, _, _ = \ emd_example.empirical_mode_decomposition(knots=knots, knot_time=time, verbose=False) print(f'AdvEMDpy annual frequency error: {np.round(sum(np.abs(ifs[1, :] / (2 * np.pi) - np.ones_like(ifs[1, :]))), 3)}') fig, axs = plt.subplots(2, 2) plt.subplots_adjust(hspace=0.5) axs[0, 0].plot(time, signal) axs[0, 1].plot(time, signal) axs[0, 1].plot(time, imfs[0, :], label='Smoothed') axs[0, 1].legend(loc='lower right') axs[1, 0].plot(time, imfs[1, :]) axs[1, 1].plot(time, imfs[2, :]) axis = 0 for ax in axs.flat: if axis == 0: ax.set(ylabel=R'C0$_2$ concentration') if axis == 1: pass if axis == 2: ax.set(ylabel=R'C0$_2$ concentration') ax.set(xlabel='Time (years)') if axis == 3: ax.set(xlabel='Time (years)') axis += 1 plt.gcf().subplots_adjust(bottom=0.15) axs[0, 0].set_title(r'Original CO$_2$ Concentration') axs[0, 1].set_title('Smoothed CO$_2$ Concentration') axs[1, 0].set_title('IMF 1') axs[1, 1].set_title('Residual') plt.gcf().subplots_adjust(bottom=0.15) plt.savefig('jss_figures/CO2_EMD.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs, hts, ifs, max_frequency=10, which_imfs=[1], plot=False) x_hs, y, z = hs_ouputs y = y / (2 * np.pi) z_min, z_max = 0, np.abs(z).max() fig, ax = plt.subplots() figure_size = plt.gcf().get_size_inches() factor = 0.7 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.set_title(textwrap.fill(r'Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using AdvEMDpy', 40)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.plot(x_hs[0, :], np.ones_like(x_hs[0, :]), 'k--', label=textwrap.fill('Annual cycle', 10)) ax.axis([x_hs.min(), x_hs.max(), y.min(), y.max()]) box_0 = ax.get_position() ax.set_position([box_0.x0 + 0.0125, box_0.y0 + 0.075, box_0.width * 0.8, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/CO2_Hilbert.png') plt.show()
2.015625
2
GPT-distributed.py
wenhuchen/LogicNLG
141
961
<filename>GPT-distributed.py<gh_stars>100-1000 import argparse import logging import torch import torch.nn.functional as F import numpy as np from torch import nn from torch.autograd import Variable from transformers import GPT2Config from transformers import GPT2LMHeadModel, GPT2Tokenizer, BertTokenizer from DataLoader import * from Model import BERTGen from utils import sample_sequence import torch.optim as optim import math import sys import pandas import os import numpy import nltk from torch.utils.tensorboard import SummaryWriter import warnings from tqdm import tqdm, trange from torch.utils.data import RandomSampler, SequentialSampler from torch.utils.data import DataLoader as DL import torch from torch.utils.data.distributed import DistributedSampler warnings.filterwarnings("ignore", category=UserWarning) device = torch.device('cuda') def set_seed(args): np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--model", default='gpt2', type=str) parser.add_argument("--top_k", type=int, default=0) parser.add_argument("--top_p", type=float, default=0.9) parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--do_train', default=False, action="store_true", help="whether to train or test the model") parser.add_argument('--do_rl', default=False, action="store_true", help="whether to train or test the model") parser.add_argument('--do_val', default=False, action="store_true", help="whether to train or test the model") parser.add_argument('--do_test', default=False, action="store_true", help="whether to compute the BLEU scores on test split") parser.add_argument('--do_test_challenge', default=False, action="store_true", help="whether to compute the BLEU scores on challenge split") parser.add_argument('--do_ppl', default=False, action="store_true", help="whether to compute perplexity of the model") parser.add_argument('--do_verify', default=False, action="store_true", help="whether compute the adv-acc score on test split") parser.add_argument('--do_verify_challenge', default=False, action="store_true", help="whether compute the adv-acc score on challenge split") parser.add_argument('--epoch', default=10, type=int, help="whether to train or test the model") parser.add_argument('--batch_size', default=6, type=int, help="whether to train or test the model") parser.add_argument('--local_rank', default=-1, type=int, help="whether to train or test the model") parser.add_argument('--learning_rate', default=2e-6, type=float, help="whether to train or test the model") parser.add_argument('--dataset', default='table', type=str, help="whether to train or test the model") parser.add_argument('--every', default=50, type=int, help="whether to train or test the model") parser.add_argument('--load_from', default='', type=str, help="whether to train or test the model") parser.add_argument('--id', default='models', type=str, help="specify the id of the experiment") parser.add_argument('--max_len', default=800, type=int, help="whether to train or test the model") parser.add_argument('--dim', default=768, type=int, help="whether to train or test the model") parser.add_argument('--layers', default=3, type=int, help="whether to train or test the model") parser.add_argument('--head', default=4, type=int, help="whether to train or test the model") parser.add_argument("--modelpath", type=str, default="bert-base-uncased", help="For distributed training: local_rank") parser.add_argument('--gradient_accumulation_steps', type=int, default=5, help="accumulation steps for gradient") parser.add_argument('--decode_first_K', type=int, default=10000, help="For debugging purpose") args = parser.parse_args() if args.local_rank == -1: device = torch.device("cuda") args.n_gpu = 1 else: torch.cuda.set_device(args.local_rank) device = torch.device('cuda', args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device if args.local_rank not in [-1, 0]: torch.distributed.barrier() tokenizer = GPT2Tokenizer.from_pretrained(args.model) model = GPT2LMHeadModel.from_pretrained(args.model) #model = nn.DataParallel(model) model.to(args.device) if args.local_rank == 0: torch.distributed.barrier() criterion = nn.CrossEntropyLoss(reduction='none', ignore_index=-1) if args.do_train: if args.local_rank in [-1, 0]: if not os.path.exists(args.id): os.mkdir(args.id) tb_writer = SummaryWriter(log_dir='tensorboard/GPT2-{}'.format(args.model)) dataset = GPTTableDataset2('data/train_lm_preprocessed.json', tokenizer, args.max_len) if args.local_rank == -1: sampler = RandomSampler(dataset) else: sampler = DistributedSampler(dataset) train_dataloader = DL(dataset, sampler=sampler, batch_size=args.batch_size, num_workers=0) model.train() optimizer = optim.Adam(model.parameters(), args.learning_rate) avg_loss = 0 global_step = 0 if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) else: model = torch.nn.DataParallel(model) for epoch_idx in trange(0, args.epoch, desc='Epoch', disable=args.local_rank not in [-1, 0]): #for idx in range(0, dataset.train_len()): for idx, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])): batch = tuple(Variable(t).to(device) for t in batch) trg_inp, trg_out, mask, caption = batch inputs = torch.cat([caption, trg_inp], 1) model.zero_grad() optimizer.zero_grad() logits = model(inputs)[0] logits = logits[:, -trg_out.shape[1]:, :].contiguous() loss = criterion(logits.view(-1, logits.shape[-1]), trg_out.view(-1)) loss = loss * mask.view(-1) loss = loss.sum() / mask.sum() avg_loss += loss.item() loss.backward() optimizer.step() global_step += 1 if args.local_rank in [-1, 0] and idx % args.every == 0 and idx > 0: tb_writer.add_scalar("perplexity", math.exp(avg_loss / args.every), global_step) fake_inputs = caption gt_inputs = trg_out.cpu().data.numpy() #samples = model.sample(fake_inputs, tabfeat, caption, highlight_idx, bert) samples = sample_sequence(model, 30, fake_inputs, []) samples = samples[:, caption.shape[1]:] samples = samples.cpu().data.numpy() for s, gt in zip(samples, gt_inputs): text = tokenizer.decode(s, clean_up_tokenization_spaces=True) text = text[: text.find(tokenizer.eos_token)] print("PREDICTION |||||| ", text) text = tokenizer.decode(gt, clean_up_tokenization_spaces=True) text = text[: text.find(tokenizer.eos_token)] print("GROUNDTRUH |||||| ",text) break avg_loss = 0 if args.local_rank in [-1, 0]: if args.model == 'gpt2': torch.save(model.state_dict(), '{}/GPT_ep{}.pt'.format(args.id, epoch_idx)) else: torch.save(model.state_dict(), '{}/GPT_medium_ep{}.pt'.format(args.id, epoch_idx)) if args.local_rank in [-1, 0]: tb_writer.close()
2.40625
2
bentoml/saved_bundle/loader.py
niits/BentoML
3,451
962
<gh_stars>1000+ # Copyright 2019 Atalaya Tech, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import os import sys import tarfile import logging import tempfile import shutil from functools import wraps from contextlib import contextmanager from urllib.parse import urlparse from typing import TYPE_CHECKING from pathlib import PureWindowsPath, PurePosixPath from bentoml.utils.s3 import is_s3_url from bentoml.utils.gcs import is_gcs_url from bentoml.exceptions import BentoMLException from bentoml.saved_bundle.config import SavedBundleConfig from bentoml.saved_bundle.pip_pkg import ZIPIMPORT_DIR if TYPE_CHECKING: from bentoml.yatai.proto.repository_pb2 import BentoServiceMetadata logger = logging.getLogger(__name__) def _is_http_url(bundle_path) -> bool: try: return urlparse(bundle_path).scheme in ["http", "https"] except ValueError: return False def _is_remote_path(bundle_path) -> bool: return isinstance(bundle_path, str) and ( is_s3_url(bundle_path) or is_gcs_url(bundle_path) or _is_http_url(bundle_path) ) @contextmanager def _resolve_remote_bundle_path(bundle_path): if is_s3_url(bundle_path): import boto3 parsed_url = urlparse(bundle_path) bucket_name = parsed_url.netloc object_name = parsed_url.path.lstrip('/') s3 = boto3.client('s3') fileobj = io.BytesIO() s3.download_fileobj(bucket_name, object_name, fileobj) fileobj.seek(0, 0) elif is_gcs_url(bundle_path): try: from google.cloud import storage except ImportError: raise BentoMLException( '"google-cloud-storage" package is required. You can install it with ' 'pip: "pip install google-cloud-storage"' ) gcs = storage.Client() fileobj = io.BytesIO() gcs.download_blob_to_file(bundle_path, fileobj) fileobj.seek(0, 0) elif _is_http_url(bundle_path): import requests response = requests.get(bundle_path) if response.status_code != 200: raise BentoMLException( f"Error retrieving BentoService bundle. " f"{response.status_code}: {response.text}" ) fileobj = io.BytesIO() fileobj.write(response.content) fileobj.seek(0, 0) else: raise BentoMLException(f"Saved bundle path: '{bundle_path}' is not supported") with tarfile.open(mode="r:gz", fileobj=fileobj) as tar: with tempfile.TemporaryDirectory() as tmpdir: filename = tar.getmembers()[0].name tar.extractall(path=tmpdir) yield os.path.join(tmpdir, filename) def resolve_remote_bundle(func): """Decorate a function to handle remote bundles.""" @wraps(func) def wrapper(bundle_path, *args): if _is_remote_path(bundle_path): with _resolve_remote_bundle_path(bundle_path) as local_bundle_path: return func(local_bundle_path, *args) return func(bundle_path, *args) return wrapper @resolve_remote_bundle def load_saved_bundle_config(bundle_path) -> "SavedBundleConfig": try: return SavedBundleConfig.load(os.path.join(bundle_path, "bentoml.yml")) except FileNotFoundError: raise BentoMLException( "BentoML can't locate config file 'bentoml.yml'" " in saved bundle in path: {}".format(bundle_path) ) def load_bento_service_metadata(bundle_path: str) -> "BentoServiceMetadata": return load_saved_bundle_config(bundle_path).get_bento_service_metadata_pb() def _find_module_file(bundle_path, service_name, module_file): # Simply join full path when module_file is just a file name, # e.g. module_file=="iris_classifier.py" module_file_path = os.path.join(bundle_path, service_name, module_file) if not os.path.isfile(module_file_path): # Try loading without service_name prefix, for loading from a installed PyPi module_file_path = os.path.join(bundle_path, module_file) # When module_file is located in sub directory # e.g. module_file=="foo/bar/iris_classifier.py" # This needs to handle the path differences between posix and windows platform: if not os.path.isfile(module_file_path): if sys.platform == "win32": # Try load a saved bundle created from posix platform on windows module_file_path = os.path.join( bundle_path, service_name, str(PurePosixPath(module_file)) ) if not os.path.isfile(module_file_path): module_file_path = os.path.join( bundle_path, str(PurePosixPath(module_file)) ) else: # Try load a saved bundle created from windows platform on posix module_file_path = os.path.join( bundle_path, service_name, PureWindowsPath(module_file).as_posix() ) if not os.path.isfile(module_file_path): module_file_path = os.path.join( bundle_path, PureWindowsPath(module_file).as_posix() ) if not os.path.isfile(module_file_path): raise BentoMLException( "Can not locate module_file {} in saved bundle {}".format( module_file, bundle_path ) ) return module_file_path @resolve_remote_bundle def load_bento_service_class(bundle_path): """ Load a BentoService class from saved bundle in given path :param bundle_path: A path to Bento files generated from BentoService#save, #save_to_dir, or the path to pip installed BentoService directory :return: BentoService class """ config = load_saved_bundle_config(bundle_path) metadata = config["metadata"] # Find and load target module containing BentoService class from given path module_file_path = _find_module_file( bundle_path, metadata["service_name"], metadata["module_file"] ) # Prepend bundle_path to sys.path for loading extra python dependencies sys.path.insert(0, bundle_path) sys.path.insert(0, os.path.join(bundle_path, metadata["service_name"])) # Include zipimport modules zipimport_dir = os.path.join(bundle_path, metadata["service_name"], ZIPIMPORT_DIR) if os.path.exists(zipimport_dir): for p in os.listdir(zipimport_dir): logger.debug('adding %s to sys.path', p) sys.path.insert(0, os.path.join(zipimport_dir, p)) module_name = metadata["module_name"] if module_name in sys.modules: logger.warning( "Module `%s` already loaded, using existing imported module.", module_name ) module = sys.modules[module_name] elif sys.version_info >= (3, 5): import importlib.util spec = importlib.util.spec_from_file_location(module_name, module_file_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) elif sys.version_info >= (3, 3): from importlib.machinery import SourceFileLoader # pylint:disable=deprecated-method module = SourceFileLoader(module_name, module_file_path).load_module( module_name ) # pylint:enable=deprecated-method else: raise BentoMLException("BentoML requires Python 3.4 and above") # Remove bundle_path from sys.path to avoid import naming conflicts sys.path.remove(bundle_path) model_service_class = module.__getattribute__(metadata["service_name"]) # Set _bento_service_bundle_path, where BentoService will load its artifacts model_service_class._bento_service_bundle_path = bundle_path # Set cls._version, service instance can access it via svc.version model_service_class._bento_service_bundle_version = metadata["service_version"] if ( model_service_class._env and model_service_class._env._requirements_txt_file is not None ): # Load `requirement.txt` from bundle directory instead of the user-provided # file path, which may only available during the bundle save process model_service_class._env._requirements_txt_file = os.path.join( bundle_path, "requirements.txt" ) return model_service_class @resolve_remote_bundle def safe_retrieve(bundle_path: str, target_dir: str): """Safely retrieve bento service to local path Args: bundle_path (:obj:`str`): The path that contains saved BentoService bundle, supporting both local file path and s3 path target_dir (:obj:`str`): Where the service contents should end up. Returns: :obj:`str`: location of safe local path """ shutil.copytree(bundle_path, target_dir) @resolve_remote_bundle def load_from_dir(bundle_path): """Load bento service from local file path or s3 path Args: bundle_path (str): The path that contains saved BentoService bundle, supporting both local file path and s3 path Returns: bentoml.service.BentoService: a loaded BentoService instance """ svc_cls = load_bento_service_class(bundle_path) return svc_cls() @resolve_remote_bundle def load_bento_service_api(bundle_path, api_name=None): bento_service = load_from_dir(bundle_path) return bento_service.get_inference_api(api_name)
1.898438
2
migrations/versions/0158_remove_rate_limit_default.py
cds-snc/notifier-api
41
963
""" Revision ID: 0158_remove_rate_limit_default Revises: 0157_add_rate_limit_to_service Create Date: 2018-01-09 14:33:08.313893 """ import sqlalchemy as sa from alembic import op revision = "0158_remove_rate_limit_default" down_revision = "0157_add_rate_limit_to_service" def upgrade(): op.execute("ALTER TABLE services ALTER rate_limit DROP DEFAULT") op.execute("ALTER TABLE services_history ALTER rate_limit DROP DEFAULT") def downgrade(): op.execute("ALTER TABLE services ALTER rate_limit SET DEFAULT '3000'") op.execute("ALTER TABLE services_history ALTER rate_limit SET DEFAULT '3000'")
1.460938
1
gen-post.py
younghk/younghk.netlify.com
0
964
<gh_stars>0 import os import errno from datetime import datetime print("Generating A New Post\n") post_name = input('Input Post Name: ') date_time = datetime.now() date_time_dir = date_time.strftime("%Y-%m-%d") date_time_post = date_time.strftime("%Y-%m-%d %H:%M:%S") p_name = post_name.replace(" ","-") p_name = p_name.replace("[","") p_name = p_name.replace("]","") p_name = p_name.lower() f_name = date_time_dir+"---"+p_name dir = "./src/pages/articles/"+f_name+"/" f_dir = dir+f_name+".md" try: if not(os.path.isdir(dir)): os.makedirs(os.path.join(dir)) except OSError as e: if e.errno != errno.EEXIST: print("Failed to create directory!!!!!") raise print("Generating post : ",f_dir) with open(f_dir, 'w') as f: f.write('---') f.write('\n') f.write('draft: true') f.write('\n') f.write('title: \"'+post_name+'\"') f.write('\n') f.write('date: \"'+date_time_post+'\"') f.write('\n') f.write('layout: post') f.write('\n') f.write('path: \"/posts/'+p_name+'/\"') f.write('\n') f.write('category: \"\"') f.write('\n') f.write('tags: ') f.write('\n') f.write('description: ""') f.write('\n') f.write('---') f.write('\n') print("Done :)")
3.0625
3
rmgpy/reactionTest.py
Lyle-zhang/RMG-Py
0
965
#!/usr/bin/env python # encoding: utf-8 -*- """ This module contains unit tests of the rmgpy.reaction module. """ import numpy import unittest from external.wip import work_in_progress from rmgpy.species import Species, TransitionState from rmgpy.reaction import Reaction from rmgpy.statmech.translation import Translation, IdealGasTranslation from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor from rmgpy.statmech.vibration import Vibration, HarmonicOscillator from rmgpy.statmech.torsion import Torsion, HinderedRotor from rmgpy.statmech.conformer import Conformer from rmgpy.kinetics import Arrhenius from rmgpy.thermo import Wilhoit import rmgpy.constants as constants ################################################################################ class PseudoSpecies: """ Can be used in place of a :class:`rmg.species.Species` for isomorphism checks. PseudoSpecies('a') is isomorphic with PseudoSpecies('A') but nothing else. """ def __init__(self, label): self.label = label def __repr__(self): return "PseudoSpecies('{0}')".format(self.label) def __str__(self): return self.label def isIsomorphic(self, other): return self.label.lower() == other.label.lower() class TestReactionIsomorphism(unittest.TestCase): """ Contains unit tests of the isomorphism testing of the Reaction class. """ def makeReaction(self,reaction_string): """" Make a Reaction (containing PseudoSpecies) of from a string like 'Ab=CD' """ reactants, products = reaction_string.split('=') reactants = [PseudoSpecies(i) for i in reactants] products = [PseudoSpecies(i) for i in products] return Reaction(reactants=reactants, products=products) def test1to1(self): r1 = self.makeReaction('A=B') self.assertTrue(r1.isIsomorphic(self.makeReaction('a=B'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('b=A'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('B=a'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('A=C'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('A=BB'))) def test1to2(self): r1 = self.makeReaction('A=BC') self.assertTrue(r1.isIsomorphic(self.makeReaction('a=Bc'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('cb=a'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('a=cb'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('bc=a'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('a=c'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=c'))) def test2to2(self): r1 = self.makeReaction('AB=CD') self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cd'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=dc'),eitherDirection=False)) self.assertTrue(r1.isIsomorphic(self.makeReaction('dc=ba'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('cd=ab'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=ab'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=cde'))) def test2to3(self): r1 = self.makeReaction('AB=CDE') self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cde'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('ba=edc'),eitherDirection=False)) self.assertTrue(r1.isIsomorphic(self.makeReaction('dec=ba'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('cde=ab'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=abc'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('abe=cde'))) class TestReaction(unittest.TestCase): """ Contains unit tests of the Reaction class. """ def setUp(self): """ A method that is called prior to each unit test in this class. """ ethylene = Species( label = 'C2H4', conformer = Conformer( E0 = (44.7127, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (28.0313, 'amu'), ), NonlinearRotor( inertia = ( [3.41526, 16.6498, 20.065], 'amu*angstrom^2', ), symmetry = 4, ), HarmonicOscillator( frequencies = ( [828.397, 970.652, 977.223, 1052.93, 1233.55, 1367.56, 1465.09, 1672.25, 3098.46, 3111.7, 3165.79, 3193.54], 'cm^-1', ), ), ], spinMultiplicity = 1, opticalIsomers = 1, ), ) hydrogen = Species( label = 'H', conformer = Conformer( E0 = (211.794, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (1.00783, 'amu'), ), ], spinMultiplicity = 2, opticalIsomers = 1, ), ) ethyl = Species( label = 'C2H5', conformer = Conformer( E0 = (111.603, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (29.0391, 'amu'), ), NonlinearRotor( inertia = ( [4.8709, 22.2353, 23.9925], 'amu*angstrom^2', ), symmetry = 1, ), HarmonicOscillator( frequencies = ( [482.224, 791.876, 974.355, 1051.48, 1183.21, 1361.36, 1448.65, 1455.07, 1465.48, 2688.22, 2954.51, 3033.39, 3101.54, 3204.73], 'cm^-1', ), ), HinderedRotor( inertia = (1.11481, 'amu*angstrom^2'), symmetry = 6, barrier = (0.244029, 'kJ/mol'), semiclassical = None, ), ], spinMultiplicity = 2, opticalIsomers = 1, ), ) TS = TransitionState( label = 'TS', conformer = Conformer( E0 = (266.694, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (29.0391, 'amu'), ), NonlinearRotor( inertia = ( [6.78512, 22.1437, 22.2114], 'amu*angstrom^2', ), symmetry = 1, ), HarmonicOscillator( frequencies = ( [412.75, 415.206, 821.495, 924.44, 982.714, 1024.16, 1224.21, 1326.36, 1455.06, 1600.35, 3101.46, 3110.55, 3175.34, 3201.88], 'cm^-1', ), ), ], spinMultiplicity = 2, opticalIsomers = 1, ), frequency = (-750.232, 'cm^-1'), ) self.reaction = Reaction( reactants = [hydrogen, ethylene], products = [ethyl], kinetics = Arrhenius( A = (501366000.0, 'cm^3/(mol*s)'), n = 1.637, Ea = (4.32508, 'kJ/mol'), T0 = (1, 'K'), Tmin = (300, 'K'), Tmax = (2500, 'K'), ), transitionState = TS, ) # CC(=O)O[O] acetylperoxy = Species( label='acetylperoxy', thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(21.0*constants.R,"J/(mol*K)"), a0=-3.95, a1=9.26, a2=-15.6, a3=8.55, B=(500.0,"K"), H0=(-6.151e+04,"J/mol"), S0=(-790.2,"J/(mol*K)")), ) # C[C]=O acetyl = Species( label='acetyl', thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(15.5*constants.R,"J/(mol*K)"), a0=0.2541, a1=-0.4712, a2=-4.434, a3=2.25, B=(500.0,"K"), H0=(-1.439e+05,"J/mol"), S0=(-524.6,"J/(mol*K)")), ) # [O][O] oxygen = Species( label='oxygen', thermo=Wilhoit(Cp0=(3.5*constants.R,"J/(mol*K)"), CpInf=(4.5*constants.R,"J/(mol*K)"), a0=-0.9324, a1=26.18, a2=-70.47, a3=44.12, B=(500.0,"K"), H0=(1.453e+04,"J/mol"), S0=(-12.19,"J/(mol*K)")), ) self.reaction2 = Reaction( reactants=[acetyl, oxygen], products=[acetylperoxy], kinetics = Arrhenius( A = (2.65e12, 'cm^3/(mol*s)'), n = 0.0, Ea = (0.0, 'kJ/mol'), T0 = (1, 'K'), Tmin = (300, 'K'), Tmax = (2000, 'K'), ), ) def testIsIsomerization(self): """ Test the Reaction.isIsomerization() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) association = Reaction(reactants=[Species(),Species()], products=[Species()]) dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) self.assertTrue(isomerization.isIsomerization()) self.assertFalse(association.isIsomerization()) self.assertFalse(dissociation.isIsomerization()) self.assertFalse(bimolecular.isIsomerization()) def testIsAssociation(self): """ Test the Reaction.isAssociation() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) association = Reaction(reactants=[Species(),Species()], products=[Species()]) dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) self.assertFalse(isomerization.isAssociation()) self.assertTrue(association.isAssociation()) self.assertFalse(dissociation.isAssociation()) self.assertFalse(bimolecular.isAssociation()) def testIsDissociation(self): """ Test the Reaction.isDissociation() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) association = Reaction(reactants=[Species(),Species()], products=[Species()]) dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) self.assertFalse(isomerization.isDissociation()) self.assertFalse(association.isDissociation()) self.assertTrue(dissociation.isDissociation()) self.assertFalse(bimolecular.isDissociation()) def testHasTemplate(self): """ Test the Reaction.hasTemplate() method. """ reactants = self.reaction.reactants[:] products = self.reaction.products[:] self.assertTrue(self.reaction.hasTemplate(reactants, products)) self.assertTrue(self.reaction.hasTemplate(products, reactants)) self.assertFalse(self.reaction2.hasTemplate(reactants, products)) self.assertFalse(self.reaction2.hasTemplate(products, reactants)) reactants.reverse() products.reverse() self.assertTrue(self.reaction.hasTemplate(reactants, products)) self.assertTrue(self.reaction.hasTemplate(products, reactants)) self.assertFalse(self.reaction2.hasTemplate(reactants, products)) self.assertFalse(self.reaction2.hasTemplate(products, reactants)) reactants = self.reaction2.reactants[:] products = self.reaction2.products[:] self.assertFalse(self.reaction.hasTemplate(reactants, products)) self.assertFalse(self.reaction.hasTemplate(products, reactants)) self.assertTrue(self.reaction2.hasTemplate(reactants, products)) self.assertTrue(self.reaction2.hasTemplate(products, reactants)) reactants.reverse() products.reverse() self.assertFalse(self.reaction.hasTemplate(reactants, products)) self.assertFalse(self.reaction.hasTemplate(products, reactants)) self.assertTrue(self.reaction2.hasTemplate(reactants, products)) self.assertTrue(self.reaction2.hasTemplate(products, reactants)) def testEnthalpyOfReaction(self): """ Test the Reaction.getEnthalpyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Hlist0 = [float(v) for v in ['-146007', '-145886', '-144195', '-141973', '-139633', '-137341', '-135155', '-133093', '-131150', '-129316']] Hlist = self.reaction2.getEnthalpiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Hlist[i] / 1000., Hlist0[i] / 1000., 2) def testEntropyOfReaction(self): """ Test the Reaction.getEntropyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Slist0 = [float(v) for v in ['-156.793', '-156.872', '-153.504', '-150.317', '-147.707', '-145.616', '-143.93', '-142.552', '-141.407', '-140.441']] Slist = self.reaction2.getEntropiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Slist[i], Slist0[i], 2) def testFreeEnergyOfReaction(self): """ Test the Reaction.getFreeEnergyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Glist0 = [float(v) for v in ['-114648', '-83137.2', '-52092.4', '-21719.3', '8073.53', '37398.1', '66346.8', '94990.6', '123383', '151565']] Glist = self.reaction2.getFreeEnergiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Glist[i] / 1000., Glist0[i] / 1000., 2) def testEquilibriumConstantKa(self): """ Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Kalist0 = [float(v) for v in ['8.75951e+29', '7.1843e+10', '34272.7', '26.1877', '0.378696', '0.0235579', '0.00334673', '0.000792389', '0.000262777', '0.000110053']] Kalist = self.reaction2.getEquilibriumConstants(Tlist, type='Ka') for i in range(len(Tlist)): self.assertAlmostEqual(Kalist[i] / Kalist0[i], 1.0, 4) def testEquilibriumConstantKc(self): """ Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Kclist0 = [float(v) for v in ['1.45661e+28', '2.38935e+09', '1709.76', '1.74189', '0.0314866', '0.00235045', '0.000389568', '0.000105413', '3.93273e-05', '1.83006e-05']] Kclist = self.reaction2.getEquilibriumConstants(Tlist, type='Kc') for i in range(len(Tlist)): self.assertAlmostEqual(Kclist[i] / Kclist0[i], 1.0, 4) def testEquilibriumConstantKp(self): """ Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Kplist0 = [float(v) for v in ['8.75951e+24', '718430', '0.342727', '0.000261877', '3.78696e-06', '2.35579e-07', '3.34673e-08', '7.92389e-09', '2.62777e-09', '1.10053e-09']] Kplist = self.reaction2.getEquilibriumConstants(Tlist, type='Kp') for i in range(len(Tlist)): self.assertAlmostEqual(Kplist[i] / Kplist0[i], 1.0, 4) def testStoichiometricCoefficient(self): """ Test the Reaction.getStoichiometricCoefficient() method. """ for reactant in self.reaction.reactants: self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), -1) for product in self.reaction.products: self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 1) for reactant in self.reaction2.reactants: self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), 0) for product in self.reaction2.products: self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 0) def testRateCoefficient(self): """ Test the Reaction.getRateCoefficient() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) P = 1e5 for T in Tlist: self.assertAlmostEqual(self.reaction.getRateCoefficient(T, P) / self.reaction.kinetics.getRateCoefficient(T), 1.0, 6) def testGenerateReverseRateCoefficient(self): """ Test the Reaction.generateReverseRateCoefficient() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) P = 1e5 reverseKinetics = self.reaction2.generateReverseRateCoefficient() for T in Tlist: kr0 = self.reaction2.getRateCoefficient(T, P) / self.reaction2.getEquilibriumConstant(T) kr = reverseKinetics.getRateCoefficient(T) self.assertAlmostEqual(kr0 / kr, 1.0, 0) def testGenerateReverseRateCoefficientArrhenius(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the Arrhenius format. """ original_kinetics = Arrhenius( A = (2.65e12, 'cm^3/(mol*s)'), n = 0.0, Ea = (0.0, 'kJ/mol'), T0 = (1, 'K'), Tmin = (300, 'K'), Tmax = (2000, 'K'), ) self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(original_kinetics.Tmin.value_si, original_kinetics.Tmax.value_si, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) @work_in_progress def testGenerateReverseRateCoefficientArrheniusEP(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the ArrheniusEP format. """ from rmgpy.kinetics import ArrheniusEP original_kinetics = ArrheniusEP( A = (2.65e12, 'cm^3/(mol*s)'), n = 0.0, alpha = 0.5, E0 = (41.84, 'kJ/mol'), Tmin = (300, 'K'), Tmax = (2000, 'K'), ) self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(original_kinetics.Tmin, original_kinetics.Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientPDepArrhenius(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the PDepArrhenius format. """ from rmgpy.kinetics import PDepArrhenius arrhenius0 = Arrhenius( A = (1.0e6,"s^-1"), n = 1.0, Ea = (10.0,"kJ/mol"), T0 = (300.0,"K"), Tmin = (300.0,"K"), Tmax = (2000.0,"K"), comment = """This data is completely made up""", ) arrhenius1 = Arrhenius( A = (1.0e12,"s^-1"), n = 1.0, Ea = (20.0,"kJ/mol"), T0 = (300.0,"K"), Tmin = (300.0,"K"), Tmax = (2000.0,"K"), comment = """This data is completely made up""", ) pressures = numpy.array([0.1, 10.0]) arrhenius = [arrhenius0, arrhenius1] Tmin = 300.0 Tmax = 2000.0 Pmin = 0.1 Pmax = 10.0 comment = """This data is completely made up""" original_kinetics = PDepArrhenius( pressures = (pressures,"bar"), arrhenius = arrhenius, Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), comment = comment, ) self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientMultiArrhenius(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the MultiArrhenius format. """ from rmgpy.kinetics import MultiArrhenius pressures = numpy.array([0.1, 10.0]) Tmin = 300.0 Tmax = 2000.0 Pmin = 0.1 Pmax = 10.0 comment = """This data is completely made up""" arrhenius = [ Arrhenius( A = (9.3e-14,"cm^3/(molecule*s)"), n = 0.0, Ea = (4740*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), Arrhenius( A = (1.4e-9,"cm^3/(molecule*s)"), n = 0.0, Ea = (11200*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), ] original_kinetics = MultiArrhenius( arrhenius = arrhenius, Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ) self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientMultiPDepArrhenius(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the MultiPDepArrhenius format. """ from rmgpy.kinetics import PDepArrhenius, MultiPDepArrhenius Tmin = 350. Tmax = 1500. Pmin = 1e-1 Pmax = 1e1 pressures = numpy.array([1e-1,1e1]) comment = 'CH3 + C2H6 <=> CH4 + C2H5 (Baulch 2005)' arrhenius = [ PDepArrhenius( pressures = (pressures,"bar"), arrhenius = [ Arrhenius( A = (9.3e-16,"cm^3/(molecule*s)"), n = 0.0, Ea = (4740*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), Arrhenius( A = (9.3e-14,"cm^3/(molecule*s)"), n = 0.0, Ea = (4740*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), ], Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), comment = comment, ), PDepArrhenius( pressures = (pressures,"bar"), arrhenius = [ Arrhenius( A = (1.4e-11,"cm^3/(molecule*s)"), n = 0.0, Ea = (11200*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), Arrhenius( A = (1.4e-9,"cm^3/(molecule*s)"), n = 0.0, Ea = (11200*constants.R*0.001,"kJ/mol"), T0 = (1,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), comment = comment, ), ], Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), comment = comment, ), ] original_kinetics = MultiPDepArrhenius( arrhenius = arrhenius, Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), comment = comment, ) self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientThirdBody(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the ThirdBody format. """ from rmgpy.kinetics import ThirdBody arrheniusLow = Arrhenius( A = (2.62e+33,"cm^6/(mol^2*s)"), n = -4.76, Ea = (10.21,"kJ/mol"), T0 = (1,"K"), ) efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2} Tmin = 300. Tmax = 2000. Pmin = 0.01 Pmax = 100. comment = """H + CH3 -> CH4""" thirdBody = ThirdBody( arrheniusLow = arrheniusLow, Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), efficiencies = efficiencies, comment = comment, ) original_kinetics = thirdBody self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientLindemann(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the Lindemann format. """ from rmgpy.kinetics import Lindemann arrheniusHigh = Arrhenius( A = (1.39e+16,"cm^3/(mol*s)"), n = -0.534, Ea = (2.243,"kJ/mol"), T0 = (1,"K"), ) arrheniusLow = Arrhenius( A = (2.62e+33,"cm^6/(mol^2*s)"), n = -4.76, Ea = (10.21,"kJ/mol"), T0 = (1,"K"), ) efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2} Tmin = 300. Tmax = 2000. Pmin = 0.01 Pmax = 100. comment = """H + CH3 -> CH4""" lindemann = Lindemann( arrheniusHigh = arrheniusHigh, arrheniusLow = arrheniusLow, Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), efficiencies = efficiencies, comment = comment, ) original_kinetics = lindemann self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testGenerateReverseRateCoefficientTroe(self): """ Test the Reaction.generateReverseRateCoefficient() method works for the Troe format. """ from rmgpy.kinetics import Troe arrheniusHigh = Arrhenius( A = (1.39e+16,"cm^3/(mol*s)"), n = -0.534, Ea = (2.243,"kJ/mol"), T0 = (1,"K"), ) arrheniusLow = Arrhenius( A = (2.62e+33,"cm^6/(mol^2*s)"), n = -4.76, Ea = (10.21,"kJ/mol"), T0 = (1,"K"), ) alpha = 0.783 T3 = 74 T1 = 2941 T2 = 6964 efficiencies = {"C": 3, "C(=O)=O": 2, "CC": 3, "O": 6, "[Ar]": 0.7, "[C]=O": 1.5, "[H][H]": 2} Tmin = 300. Tmax = 2000. Pmin = 0.01 Pmax = 100. comment = """H + CH3 -> CH4""" troe = Troe( arrheniusHigh = arrheniusHigh, arrheniusLow = arrheniusLow, alpha = alpha, T3 = (T3,"K"), T1 = (T1,"K"), T2 = (T2,"K"), Tmin = (Tmin,"K"), Tmax = (Tmax,"K"), Pmin = (Pmin,"bar"), Pmax = (Pmax,"bar"), efficiencies = efficiencies, comment = comment, ) original_kinetics = troe self.reaction2.kinetics = original_kinetics reverseKinetics = self.reaction2.generateReverseRateCoefficient() self.reaction2.kinetics = reverseKinetics # reverse reactants, products to ensure Keq is correctly computed self.reaction2.reactants, self.reaction2.products = self.reaction2.products, self.reaction2.reactants reversereverseKinetics = self.reaction2.generateReverseRateCoefficient() # check that reverting the reverse yields the original Tlist = numpy.arange(Tmin, Tmax, 200.0, numpy.float64) P = 1e5 for T in Tlist: korig = original_kinetics.getRateCoefficient(T, P) krevrev = reversereverseKinetics.getRateCoefficient(T, P) self.assertAlmostEqual(korig / krevrev, 1.0, 0) def testTSTCalculation(self): """ A test of the transition state theory k(T) calculation function, using the reaction H + C2H4 -> C2H5. """ Tlist = 1000.0/numpy.arange(0.4, 3.35, 0.01) klist = numpy.array([self.reaction.calculateTSTRateCoefficient(T) for T in Tlist]) arrhenius = Arrhenius().fitToData(Tlist, klist, kunits='m^3/(mol*s)') klist2 = numpy.array([arrhenius.getRateCoefficient(T) for T in Tlist]) # Check that the correct Arrhenius parameters are returned self.assertAlmostEqual(arrhenius.A.value_si, 2265.2488, delta=1e-2) self.assertAlmostEqual(arrhenius.n.value_si, 1.45419, delta=1e-4) self.assertAlmostEqual(arrhenius.Ea.value_si, 6645.24, delta=1e-2) # Check that the fit is satisfactory (defined here as always within 5%) for i in range(len(Tlist)): self.assertAlmostEqual(klist[i], klist2[i], delta=5e-2 * klist[i]) def testPickle(self): """ Test that a Reaction object can be successfully pickled and unpickled with no loss of information. """ import cPickle reaction = cPickle.loads(cPickle.dumps(self.reaction,-1)) self.assertEqual(len(self.reaction.reactants), len(reaction.reactants)) self.assertEqual(len(self.reaction.products), len(reaction.products)) for reactant0, reactant in zip(self.reaction.reactants, reaction.reactants): self.assertAlmostEqual(reactant0.conformer.E0.value_si / 1e6, reactant.conformer.E0.value_si / 1e6, 2) self.assertEqual(reactant0.conformer.E0.units, reactant.conformer.E0.units) for product0, product in zip(self.reaction.products, reaction.products): self.assertAlmostEqual(product0.conformer.E0.value_si / 1e6, product.conformer.E0.value_si / 1e6, 2) self.assertEqual(product0.conformer.E0.units, product.conformer.E0.units) self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, reaction.transitionState.conformer.E0.value_si / 1e6, 2) self.assertEqual(self.reaction.transitionState.conformer.E0.units, reaction.transitionState.conformer.E0.units) self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, reaction.transitionState.frequency.value_si, 2) self.assertEqual(self.reaction.transitionState.frequency.units, reaction.transitionState.frequency.units) self.assertAlmostEqual(self.reaction.kinetics.A.value_si, reaction.kinetics.A.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.n.value_si, reaction.kinetics.n.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.T0.value_si, reaction.kinetics.T0.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.Ea.value_si, reaction.kinetics.Ea.value_si, delta=1e-6) self.assertEqual(self.reaction.kinetics.comment, reaction.kinetics.comment) self.assertEqual(self.reaction.duplicate, reaction.duplicate) self.assertEqual(self.reaction.degeneracy, reaction.degeneracy) def testOutput(self): """ Test that a Reaction object can be successfully reconstructed from its repr() output with no loss of information. """ exec('reaction = %r' % (self.reaction)) self.assertEqual(len(self.reaction.reactants), len(reaction.reactants)) self.assertEqual(len(self.reaction.products), len(reaction.products)) for reactant0, reactant in zip(self.reaction.reactants, reaction.reactants): self.assertAlmostEqual(reactant0.conformer.E0.value_si / 1e6, reactant.conformer.E0.value_si / 1e6, 2) self.assertEqual(reactant0.conformer.E0.units, reactant.conformer.E0.units) for product0, product in zip(self.reaction.products, reaction.products): self.assertAlmostEqual(product0.conformer.E0.value_si / 1e6, product.conformer.E0.value_si / 1e6, 2) self.assertEqual(product0.conformer.E0.units, product.conformer.E0.units) self.assertAlmostEqual(self.reaction.transitionState.conformer.E0.value_si / 1e6, reaction.transitionState.conformer.E0.value_si / 1e6, 2) self.assertEqual(self.reaction.transitionState.conformer.E0.units, reaction.transitionState.conformer.E0.units) self.assertAlmostEqual(self.reaction.transitionState.frequency.value_si, reaction.transitionState.frequency.value_si, 2) self.assertEqual(self.reaction.transitionState.frequency.units, reaction.transitionState.frequency.units) self.assertAlmostEqual(self.reaction.kinetics.A.value_si, reaction.kinetics.A.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.n.value_si, reaction.kinetics.n.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.T0.value_si, reaction.kinetics.T0.value_si, delta=1e-6) self.assertAlmostEqual(self.reaction.kinetics.Ea.value_si, reaction.kinetics.Ea.value_si, delta=1e-6) self.assertEqual(self.reaction.kinetics.comment, reaction.kinetics.comment) self.assertEqual(self.reaction.duplicate, reaction.duplicate) self.assertEqual(self.reaction.degeneracy, reaction.degeneracy) ################################################################################ if __name__ == '__main__': unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
2.328125
2
backend/core/api_urls.py
albeiks/omaralbeik.com
10
966
<filename>backend/core/api_urls.py from django.conf.urls import url, include from core.routers import OptionalTrailingSlashRouter from blog import views as blogViews from snippets import views as snippetsViews from projects import views as projectsViews from tags import views as tagsViews from contents import views as contentsViews from contact import views as contactViews router = OptionalTrailingSlashRouter() router.register(r"blog", blogViews.PostViewSet) router.register(r"snippets", snippetsViews.SnippetViewSet) router.register(r"languages", snippetsViews.ProgrammingLanguageViewSet) router.register(r"projects", projectsViews.ProjectViewSet) router.register(r"tags", tagsViews.TagViewSet) router.register(r"contents", contentsViews.ContentViewSet) router.register(r"contact", contactViews.MessageViewSet) # List or url patterns for the api subdomain urlpatterns = [ url(r"^v2/", include(router.urls)), ]
2.078125
2
python/video_ADG.py
alexberndt/mobile-AGV-optimization
2
967
""" closed-loop MILP solved to determine optimal ordering defined by ADG """ import sys import yaml import time import matplotlib.colors as mcolors import matplotlib import matplotlib.pyplot as plt import random import logging import time import networkx as nx import csv import statistics as stat import os import sys from mip import Model, ProgressLog, xsum, maximize, minimize, BINARY, CONTINUOUS, Constr, ConstrList sys.path.insert(1, "functions/") from planners import * from visualizers import * from milp_formulation import * from robot import * from adg import * from adg_node import * from process_results import * logger = logging.getLogger(__name__) logging.basicConfig(format='%(name)s - %(levelname)s :: %(message)s', level=logging.INFO) def main(): """ --------------------------- INPUTS --------------------------------- """ show_visual = False show_ADG = True #not show_visual run_MILP = True #False #True save_file = False sim_timeout = 500 # define prediction and control horizons: H_prediction >= H_control H_prediction = np.NaN # integer value for forward node lookup H_control = 5 random_seed = 0 mu = 0.5 robust_param = 0.0 delay_amount = 5 delayed_robot_cnt = 2 w = 1.4 # sub-optimality bound: w = 1.0 -> CBS, else ECBS! fldr = "nuernberg_small" # auto_gen_01_nuernberg | auto_gen_00_large | auto_gen_02_simple | manual_03_maxplus random.seed(random_seed) np.random.seed(random_seed) """ -------------------------------------------------------------------- """ # start initial pwd = os.path.dirname(os.path.abspath(__file__)) logger.info(pwd) map_file = pwd + "/data/" + fldr + "/csv_map_yaml.yaml" robot_file = pwd + "/data/" + fldr + "/csv_robots_yaml.yaml" robot_file_tmp = pwd + "/data/tmp/robots.yaml" start_time = time.time() plans = run_CBS(map_file, robot_file, w=w) # if w > 1.0, run_CBS uses ECBS! logger.info(" with sub-optimality w={}".format(w)) logger.info(" plan statistics: {} \n".format(plans["statistics"])) logger.debug(plans["schedule"]) # show factory map # show_factory_map(map_file, robot_file, True) # plt.show() map_gen_robot_count = 10 map_gen_seedval = "NaN" try: map_gen_robot_count = int(sys.argv[1]) map_gen_seedval = int(sys.argv[2]) H_control = int(sys.argv[3]) robust_param = int(sys.argv[4]) random.seed(map_gen_seedval) # map_gen_seedval np.random.seed(map_gen_seedval) # map_gen_seedval except: print(" no valid inputs given, ignoring ...") # determine ADG, reverse ADG and dependency groups ADG, robot_plan, goal_positions = determine_ADG(plans, show_graph=False) nodes_all, edges_type_1, dependency_groups = analyze_ADG(ADG, plans, show_graph=False) ADG_reverse = ADG.reverse(copy=False) # initialize simulation robots = [] solve_time = [] robots_done = [] time_to_goal = {} colors = plt.cm.rainbow( np.arange(len(robot_plan))/len(robot_plan) ) for robot_id in robot_plan: plan = robot_plan[robot_id] logger.debug("Robot {} - plan: {} \t \t positions: {}".format(robot_id, plan["nodes"], plan["positions"])) new_robot = Robot(robot_id, plan, colors[robot_id], goal_positions[robot_id]) robots.append(new_robot) robots_done.append(False) time_to_goal[robot_id] = 0 if show_visual: visualizer = Visualizer(map_file, robots) # initialize optimization MIP object m_opt m_opt = Model('MILP_sequence', solver='CBC') # print(m_opt.max_nodes) pl_opt = ProgressLog() # pl_opt.settings = "objective_value" # print("pl_opt.settings: {}".format(pl_opt.settings)) # print("pl_opt.log: {}".format(pl_opt.log)) # pl_opt.instance = m_opt.name # print("pl_opt.instance: {}".format(pl_opt.instance)) ADG_fig = plt.figure(figsize=(12,8)) plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0) metadata = dict(title='Movie Test', artist='Matplotlib', comment='Movie support!') writer = FFMpegWriter(fps=2, metadata=metadata) with writer.saving(ADG_fig, "ADG_video.mp4", 500): # run a simulation in time k = 0 robot_IDs_to_delay = [] while (not all(robots_done)) and (k < sim_timeout): print("pl_opt.log: {}".format(pl_opt.log)) m_opt.clear() # show current robot status logger.info("-------------------- @ time step k = {} --------------------".format(k)) for robot in robots: node_info = ADG.node[robot.current_node]["data"] logger.debug(" - Robot {} # {} @ {} => status: {}".format(robot.robot_ID, node_info.ID, node_info.s_loc, robot.status)) # solve MILP for the advanced ADG to potentially adjust ordering res, solve_t = solve_MILP(robots, dependency_groups, ADG, ADG_reverse, H_control, H_prediction, m_opt, pl_opt, run=run_MILP, uncertainty_bound=robust_param) solve_time.append(solve_t) if not (res is None or res == "OptimizationStatus.OPTIMAL"): ValueError("Optimization NOT optimal") # ADG after MILP if show_ADG: # draw_ADG(ADG, robots, "ADG after MILP ADG | k = {}".format(k), writer=writer) # plt.show() # check for cycles try: nx.find_cycle(ADG, orientation="original") logger.warning("Cycle detected!!") raise Exception("ADG has a cycle => deadlock! something is wrong with optimization") except nx.NetworkXNoCycle: logger.debug("no cycle detected in ADG => no deadlock. good!") pass if (k % delay_amount) == 0: robot_IDs = np.arange(map_gen_robot_count) robot_IDs_to_delay = np.random.choice(map_gen_robot_count, size=delayed_robot_cnt, replace=False) logger.info("delaying robots (ID): {}".format(robot_IDs_to_delay)) # Advance robots if possible (dependencies have been met) for robot in robots: # check if all dependencies have been met, to advance to next node node_info = ADG.node[robot.current_node]["data"] node_dependencies_list = list(ADG_reverse.neighbors(robot.current_node)) all_dependencies_completed = True for dependency in node_dependencies_list: if (ADG.node[dependency]["data"].status != Status.FINISHED): all_dependencies_completed = False # if all dependencies are completed, the robot can advance! # delay_amount = np.random.poisson(mu) # same sample every time if all_dependencies_completed and k > 0: # (robot.robot_ID == 2 or k > 5) if (not (robot.robot_ID in robot_IDs_to_delay)): # or (k < 10 or k > 20)): # or (robot.robot_ID == 3 or k > 8): ADG.node[robot.current_node]["data"].status = Status.FINISHED robot.advance() if not robot.is_done(): time_to_goal[robot.robot_ID] += 1 else: robots_done[robot.robot_ID] = True if show_visual: visualizer.redraw(robots, pause_length=0.1) # return 0 k += 1 # end of while loop total_time = 0 for idx, t in time_to_goal.items(): total_time += t logger.info("Total time to complete missions: {}".format(total_time)) logger.info("horizon = {}".format(H_control)) logger.info("") logger.info("Computation time:") logger.info(" - max: {}".format(max(solve_time))) logger.info(" - avg: {}".format(stat.mean(solve_time))) # create data to save to YAML file simulation_results = {} simulation_results["parameters"] = {} simulation_results["parameters"]["H_control"] = H_control simulation_results["parameters"]["random seed"] = random_seed simulation_results["parameters"]["ECBS w"] = w simulation_results["parameters"]["mu"] = mu simulation_results["parameters"]["robust param"] = robust_param simulation_results["parameters"]["delay amount"] = delay_amount simulation_results["map details"] = {} simulation_results["map details"]["robot_count"] = map_gen_robot_count simulation_results["map details"]["seed val"] = map_gen_seedval simulation_results["results"] = {} simulation_results["results"]["comp time"] = {} simulation_results["results"]["comp time"]["solve_time"] = [solve_time] simulation_results["results"]["comp time"]["max"] = max(solve_time) simulation_results["results"]["comp time"]["avg"] = stat.mean(solve_time) simulation_results["results"]["total time"] = total_time logger.info(simulation_results) file_name = pwd + "/results/robust_" +str(delayed_robot_cnt) + "x" + str(delay_amount) + "/res_robots_" + str(map_gen_robot_count) + "_horizon_" + str(H_control) + "_mapseed_" + str(map_gen_seedval) + "_robustparam_" + str(robust_param) + ".yaml" if save_file: save_to_yaml(simulation_results, file_name) if __name__ == "__main__": main()
2.25
2
tests/model/test_guest.py
bcurnow/rfid-security-svc
0
968
<reponame>bcurnow/rfid-security-svc import pytest from unittest.mock import patch import rfidsecuritysvc.model.guest as model from rfidsecuritysvc.model.color import Color from rfidsecuritysvc.model.guest import Guest from rfidsecuritysvc.model.sound import Sound from rfidsecuritysvc.exception import SoundNotFoundError def test_Guest(assert_model, default_sound, default_color): assert_model(_model(1, 'first', 'last', default_sound, default_color), Guest(1, 'first', 'last', default_sound, default_color)) @patch('rfidsecuritysvc.model.guest.table') def test_get(table): table.get.return_value = _default().test_to_row() assert model.get(1) == _default() table.get.assert_called_once_with(1) @patch('rfidsecuritysvc.model.guest.table') def test_get_notfound(table): table.get.return_value = None assert model.get(1) is None table.get.assert_called_once_with(1) @patch('rfidsecuritysvc.model.guest.table') def test_list(table): table.list.return_value = [ _default().test_to_row(), _default(2).test_to_row(), ] models = model.list() table.list.assert_called_once() assert models == [_default(), _default(2)] @patch('rfidsecuritysvc.model.guest.table') def test_list_noresults(table): table.list.return_value = [] models = model.list() table.list.assert_called_once() assert models == [] @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_create(table, sound, default_sound): sound.get.return_value = default_sound table.create.return_value = None assert model.create('first', 'last', default_sound.id, 0xABCDEF) is None sound.get.assert_called_once_with(default_sound.id) table.create.assert_called_once_with('first', 'last', default_sound.id, 0xABCDEF) @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_create_SoundNotFoundError(table, sound, default_sound): sound.get.return_value = None with pytest.raises(SoundNotFoundError): model.create('first', 'last', default_sound.id, 0xABCDEF) sound.get.assert_called_once_with(default_sound.id) table.create.assert_not_called() @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_create_no_prefs(table, sound, default_sound): table.create.return_value = None assert model.create('first', 'last', None, None) is None sound.get.assert_not_called() table.create.assert_called_once_with('first', 'last', None, None) @patch('rfidsecuritysvc.model.guest.table') def test_delete(table): table.delete.return_value = 1 assert model.delete(1) == 1 table.delete.assert_called_with(1) @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_update(table, sound, default_sound): sound.get.return_value = default_sound table.update.return_value = 1 assert model.update(1, 'first', 'last', default_sound.id, 0xABCDEF) == 1 sound.get.assert_called_once_with(default_sound.id) table.update.assert_called_once_with(1, 'first', 'last', default_sound.id, 0xABCDEF) @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_update_no_prefs(table, sound, default_sound): table.update.return_value = 1 assert model.update(1, 'first', 'last', None, None) == 1 sound.get.assert_not_called() table.update.assert_called_once_with(1, 'first', 'last', None, None) @patch('rfidsecuritysvc.model.guest.sound_model') @patch('rfidsecuritysvc.model.guest.table') def test_update_SoundNotFoundError(table, sound, default_sound): table.update.return_value = 1 sound.get.return_value = None with pytest.raises(SoundNotFoundError): model.update(1, 'first', 'last', default_sound.id, 0xABCDEF) sound.get.assert_called_once_with(default_sound.id) table.update.assert_not_called() def test__model_no_color(creatable_guest): row = creatable_guest.test_to_row() row['color'] = None g = model.__model(row) assert g.color is None def test__model_no_sound(creatable_guest): row = creatable_guest.test_to_row() row['sound'] = None g = model.__model(row) assert g.sound is None def _default(index=1): return _model(index, f'first {index}', f'last {index}', Sound(index, f'sound_name {index}', '2021-09-25 23:13:25'), Color(0xABCDEF)) def _model(id, first_name, last_name, sound, color): return Guest(id, first_name, last_name, sound, color)
2.4375
2
ice/consoles.py
reavessm/Ice
578
969
<reponame>reavessm/Ice<gh_stars>100-1000 # encoding: utf-8 import os import roms def console_roms_directory(configuration, console): """ If the user has specified a custom ROMs directory in consoles.txt then return that. Otherwise, append the shortname of the console to the default ROMs directory given by config.txt. """ if console.custom_roms_directory: return console.custom_roms_directory return os.path.join(roms.roms_directory(configuration), console.shortname) def path_is_rom(console, path): """ This function determines if a given path is actually a valid ROM file. If a list of extensions is supplied for this console, we check if the path has a valid extension If no extensions are defined for this console, we just accept any file """ if console.extensions == "": return True # Normalize the extension based on the things we validly ignore. # Aka capitalization, whitespace, and leading dots normalize = lambda ext: ext.lower().strip().lstrip('.') (name, ext) = os.path.splitext(path) valid_extensions = console.extensions.split(',') return normalize(ext) in map(normalize, valid_extensions)
3.4375
3
clue/c3.py
dumpmemory/roformer-v2
44
970
<filename>clue/c3.py #! -*- coding:utf-8 -*- # CLUE评测 # c3多项选择阅读理解 # 思路:每个选项分别与问题、篇章拼接后打分排序 import json import numpy as np from snippets import * from bert4keras.backend import keras from bert4keras.snippets import sequence_padding, DataGenerator from bert4keras.snippets import open from bert4keras.snippets import truncate_sequences from tqdm import tqdm # 基本参数 num_classes = 4 maxlen = 512 batch_size = 4 epochs = 10 def load_data(filename): """加载数据 格式:[(篇章, 问题, 选项, 答案id)] """ D = [] with open(filename) as f: data = json.load(f) for d in data: p = u'||'.join(d[0]) for qa in d[1]: q = qa['question'] while len(qa['choice']) < num_classes: qa['choice'].append(u'无效答案') c = qa['choice'][:num_classes] if 'answer' in qa: a = qa['choice'].index(qa['answer']) else: a = 0 D.append((p, q, c, a)) return D # 加载数据集 train_data = load_data(data_path + 'c3/m-train.json') train_data += load_data(data_path + 'c3/d-train.json') valid_data = load_data(data_path + 'c3/m-dev.json') valid_data += load_data(data_path + 'c3/d-dev.json') class data_generator(DataGenerator): """数据生成器 """ def __iter__(self, random=False): batch_token_ids, batch_segment_ids, batch_labels = [], [], [] for is_end, (p, q, cs, a) in self.sample(random): for c in cs: p_ids = tokenizer.encode(p)[0] q_ids = tokenizer.encode(q)[0][1:] c_ids = tokenizer.encode(c)[0][1:] truncate_sequences(maxlen, -2, c_ids, q_ids, p_ids) token_ids = p_ids + q_ids + c_ids batch_token_ids.append(token_ids) batch_segment_ids.append([0] * len(token_ids)) batch_labels.append([a]) if len(batch_token_ids) == self.batch_size * num_classes or is_end: batch_token_ids = sequence_padding(batch_token_ids) batch_segment_ids = sequence_padding(batch_segment_ids) batch_labels = sequence_padding(batch_labels) yield [batch_token_ids, batch_segment_ids], batch_labels batch_token_ids, batch_segment_ids, batch_labels = [], [], [] # 转换数据集 train_generator = data_generator(train_data, batch_size) valid_generator = data_generator(valid_data, batch_size) def multichoice_crossentropy(y_true, y_pred): """多项选择的交叉熵 """ y_true = K.cast(y_true, 'int32')[::num_classes] y_pred = K.reshape(y_pred, (-1, num_classes)) return K.mean( K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True) ) def multichoice_accuracy(y_true, y_pred): """多项选择的准确率 """ y_true = K.cast(y_true, 'int32')[::num_classes, 0] y_pred = K.reshape(y_pred, (-1, num_classes)) y_pred = K.cast(K.argmax(y_pred, axis=1), 'int32') return K.mean(K.cast(K.equal(y_true, y_pred), K.floatx())) # 构建模型 output = base.model.output output = keras.layers.Lambda(lambda x: x[:, 0])(output) output = keras.layers.Dense(units=1, kernel_initializer=base.initializer)(output) model = keras.models.Model(base.model.input, output) model.summary() model.compile( loss=multichoice_crossentropy, optimizer=optimizer4, metrics=[multichoice_accuracy] ) class Evaluator(keras.callbacks.Callback): """保存验证集acc最好的模型 """ def __init__(self): self.best_val_acc = 0. def on_epoch_end(self, epoch, logs=None): val_acc = self.evaluate(valid_generator) if val_acc > self.best_val_acc: self.best_val_acc = val_acc model.save_weights('weights/c3.weights') print( u'val_acc: %.5f, best_val_acc: %.5f\n' % (val_acc, self.best_val_acc) ) def evaluate(self, data): total, right = 0., 0. for x_true, y_true in data: y_pred = model.predict(x_true).reshape((-1, num_classes)) y_pred = y_pred.argmax(axis=1) y_true = y_true[::num_classes, 0] total += len(y_true) right += (y_true == y_pred).sum() return right / total def test_predict(in_file, out_file): """输出测试结果到文件 结果文件可以提交到 https://www.cluebenchmarks.com 评测。 """ test_data = load_data(in_file) test_generator = data_generator(test_data, batch_size) results = [] for x_true, _ in tqdm(test_generator, ncols=0): y_pred = model.predict(x_true).reshape((-1, num_classes)) y_pred = y_pred.argmax(axis=1) results.extend(y_pred) fw = open(out_file, 'w') with open(in_file) as fr: data = json.load(fr) i = 0 for d in data: for qa in d[1]: l = json.dumps({'id': str(qa['id']), 'label': str(results[i])}) fw.write(l + '\n') i += 1 fw.close() if __name__ == '__main__': evaluator = Evaluator() model.fit_generator( train_generator.forfit(), steps_per_epoch=len(train_generator), epochs=epochs, callbacks=[evaluator] ) model.load_weights('weights/c3.weights') test_predict( in_file=data_path + 'c3/test1.0.json', out_file='results/c310_predict.json' ) test_predict( in_file=data_path + 'c3/test1.1.json', out_file='results/c311_predict.json' ) else: model.load_weights('weights/c3.weights')
2.65625
3
tools/perf/contrib/oop_raster/oop_raster.py
zipated/src
2,151
971
<filename>tools/perf/contrib/oop_raster/oop_raster.py # Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from benchmarks import smoothness,thread_times import page_sets from telemetry import benchmark # pylint: disable=protected-access def CustomizeBrowserOptionsForOopRasterization(options): """Enables flags needed for out of process rasterization.""" options.AppendExtraBrowserArgs('--force-gpu-rasterization') options.AppendExtraBrowserArgs('--enable-oop-rasterization') @benchmark.Owner(emails=['<EMAIL>']) class SmoothnessOopRasterizationTop25(smoothness._Smoothness): """Measures rendering statistics for the top 25 with oop rasterization. """ tag = 'oop_rasterization' page_set = page_sets.Top25SmoothPageSet def SetExtraBrowserOptions(self, options): CustomizeBrowserOptionsForOopRasterization(options) @classmethod def Name(cls): return 'smoothness.oop_rasterization.top_25_smooth' @benchmark.Owner(emails=['<EMAIL>']) class ThreadTimesOopRasterKeyMobile(thread_times._ThreadTimes): """Measure timeline metrics for key mobile pages while using out of process raster.""" tag = 'oop_rasterization' page_set = page_sets.KeyMobileSitesSmoothPageSet options = {'story_tag_filter': 'fastpath'} def SetExtraBrowserOptions(self, options): super(ThreadTimesOopRasterKeyMobile, self).SetExtraBrowserOptions(options) CustomizeBrowserOptionsForOopRasterization(options) @classmethod def Name(cls): return 'thread_times.oop_rasterization.key_mobile'
2
2
logpy/util.py
mrocklin/logpy
1
972
<reponame>mrocklin/logpy<filename>logpy/util.py import itertools as it from toolz.compatibility import range, map, iteritems def hashable(x): try: hash(x) return True except TypeError: return False def transitive_get(key, d): """ Transitive dict.get >>> from logpy.util import transitive_get >>> d = {1: 2, 2: 3, 3: 4} >>> d.get(1) 2 >>> transitive_get(1, d) 4 """ while hashable(key) and key in d: key = d[key] return key def deep_transitive_get(key, d): """ Transitive get that propagates within tuples >>> from logpy.util import transitive_get, deep_transitive_get >>> d = {1: (2, 3), 2: 12, 3: 13} >>> transitive_get(1, d) (2, 3) >>> deep_transitive_get(1, d) (12, 13) """ key = transitive_get(key, d) if isinstance(key, tuple): return tuple(map(lambda k: deep_transitive_get(k, d), key)) else: return key def dicthash(d): return hash(frozenset(d.items())) def multihash(x): try: return hash(x) except TypeError: if isinstance(x, (list, tuple, set, frozenset)): return hash(tuple(map(multihash, x))) if type(x) is dict: return hash(frozenset(map(multihash, x.items()))) if type(x) is slice: return hash((x.start, x.stop, x.step)) raise TypeError('Hashing not covered for ' + str(x)) def unique(seq, key=lambda x: x): seen = set() for item in seq: try: if key(item) not in seen: seen.add(key(item)) yield item except TypeError: # item probably isn't hashable yield item # Just return it and hope for the best def interleave(seqs, pass_exceptions=()): iters = map(iter, seqs) while iters: newiters = [] for itr in iters: try: yield next(itr) newiters.append(itr) except (StopIteration,) + tuple(pass_exceptions): pass iters = newiters def take(n, seq): if n is None: return seq if n == 0: return tuple(seq) return tuple(it.islice(seq, 0, n)) def evalt(t): """ Evaluate tuple if unevaluated >>> from logpy.util import evalt >>> add = lambda x, y: x + y >>> evalt((add, 2, 3)) 5 >>> evalt(add(2, 3)) 5 """ if isinstance(t, tuple) and len(t) >= 1 and callable(t[0]): return t[0](*t[1:]) else: return t def intersection(*seqs): return (item for item in seqs[0] if all(item in seq for seq in seqs[1:])) def groupsizes(total, len): """ Groups of length len that add up to total >>> from logpy.util import groupsizes >>> tuple(groupsizes(4, 2)) ((1, 3), (2, 2), (3, 1)) """ if len == 1: yield (total,) else: for i in range(1, total - len + 1 + 1): for perm in groupsizes(total - i, len - 1): yield (i,) + perm def raises(err, lamda): try: lamda() raise Exception("Did not raise %s"%err) except err: pass def pprint(g): """ Pretty print a tree of goals """ if callable(g) and hasattr(g, '__name__'): return g.__name__ if isinstance(g, type): return g.__name__ if isinstance(g, tuple): return "(" + ', '.join(map(pprint, g)) + ")" return str(g) def index(tup, ind): """ Fancy indexing with tuples """ return tuple(tup[i] for i in ind)
2.6875
3
index.py
rinocloud/rinobot-plugin-shift
0
973
import rinobot_plugin as bot import numpy as np def main(): # lets get our parameters and data filepath = bot.filepath() data = bot.loadfile(filepath) # now comes the custom plugin logic shift = bot.get_arg('shift', type=float, required=True) index = bot.index_from_args(data) data[index] = data[index] + shift outname = bot.no_extension() + '-shift-%s.txt' % shift # then we set up the output outpath = bot.output_filepath(outname) np.savetxt(outpath, data) if __name__ == "__main__": main()
2.34375
2
gluon/contrib/memcache/__init__.py
arsfeld/fog-web2py
0
974
from gluon.contrib.memcache.memcache import Client import time """ examle of usage: cache.memcache=MemcacheClient(request,[127.0.0.1:11211],debug=true) """ import cPickle as pickle import thread locker = thread.allocate_lock() def MemcacheClient(*a, **b): locker.acquire() if not hasattr(MemcacheClient, '__mc_instance'): MemcacheClient.__mc_instance = _MemcacheClient(*a, **b) locker.release() return MemcacheClient.__mc_instance class _MemcacheClient(Client): def __init__(self, request, servers, debug=0, pickleProtocol=0, pickler=pickle.Pickler, unpickler=pickle.Unpickler, pload=None, pid=None): self.request=request Client.__init__(self,servers,debug,pickleProtocol, pickler,unpickler,pload,pid) def __call__(self,key,f,time_expire=300): #key=self.__keyFormat__(key) value=None obj=self.get(key) if obj: value=obj elif f is None: if obj: self.delete(key) else: value=f() self.set(key,value,time_expire) return value def increment(self,key,value=1,time_expire=300): newKey=self.__keyFormat__(key) obj=self.get(newKey) if obj: return Client.incr(self,newKey,value) else: self.set(newKey,value,time_expire) return value def set(self,key,value,time_expire=300): newKey = self.__keyFormat__(key) return Client.set(self,newKey,value,time_expire) def get(self,key): newKey = self.__keyFormat__(key) return Client.get(self,newKey) def delete(self,key): newKey = self.__keyFormat__(key) return Client.delete(self,newKey) def __keyFormat__(self,key): return '%s/%s' % (self.request.application,key.replace(' ','_'))
2.671875
3
test_data/parse/unexpected/symbol_table/inheritance_from_non_class/meta_model.py
aas-core-works/aas-core-csharp-codegen
0
975
<filename>test_data/parse/unexpected/symbol_table/inheritance_from_non_class/meta_model.py class Some_enum(Enum): some_literal = "some_literal" class Something(Some_enum): pass class Reference: pass __book_url__ = "dummy" __book_version__ = "dummy" associate_ref_with(Reference)
2.078125
2
deepcut/deepcut.py
wannaphong/deepcut
17
976
#!/usr/bin/env python # encoding: utf-8 import numbers import os import re import sys from itertools import chain import numpy as np import scipy.sparse as sp import six import pickle from .model import get_convo_nn2 from .stop_words import THAI_STOP_WORDS from .utils import CHAR_TYPES_MAP, CHARS_MAP, create_feature_array MODULE_PATH = os.path.dirname(__file__) WEIGHT_PATH = os.path.join(MODULE_PATH, 'weight', 'cnn_without_ne_ab.h5') TOKENIZER = None def tokenize(text, custom_dict=None): """ Tokenize given Thai text string Input ===== text: str, Thai text string custom_dict: str (or list), path to customized dictionary file It allows the function not to tokenize given dictionary wrongly. The file should contain custom words separated by line. Alternatively, you can provide list of custom words too. Output ====== tokens: list, list of tokenized words Example ======= >> deepcut.tokenize('ตัดคำได้ดีมาก') >> ['ตัดคำ','ได้','ดี','มาก'] """ global TOKENIZER if not TOKENIZER: TOKENIZER = DeepcutTokenizer() return TOKENIZER.tokenize(text, custom_dict=custom_dict) def _custom_dict(word, text, word_end): word_length = len(word) initial_loc = 0 while True: try: start_char = re.search(word, text).start() first_char = start_char + initial_loc last_char = first_char + word_length - 1 initial_loc += start_char + word_length text = text[start_char + word_length:] word_end[first_char:last_char] = (word_length - 1) * [0] word_end[last_char] = 1 except: break return word_end def _document_frequency(X): """ Count the number of non-zero values for each feature in sparse X. """ if sp.isspmatrix_csr(X): return np.bincount(X.indices, minlength=X.shape[1]) return np.diff(sp.csc_matrix(X, copy=False).indptr) def _check_stop_list(stop): """ Check stop words list ref: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/feature_extraction/text.py#L87-L95 """ if stop == "thai": return THAI_STOP_WORDS elif isinstance(stop, six.string_types): raise ValueError("not a built-in stop list: %s" % stop) elif stop is None: return None # assume it's a collection return frozenset(stop) def load_model(file_path): """ Load saved pickle file of DeepcutTokenizer Parameters ========== file_path: str, path to saved model from ``save_model`` method in DeepcutTokenizer """ tokenizer = pickle.load(open(file_path, 'rb')) tokenizer.model = get_convo_nn2() tokenizer.model = tokenizer.model.load_weights(WEIGHT_PATH) return tokenizer class DeepcutTokenizer(object): """ Class for tokenizing given Thai text documents using deepcut library Parameters ========== ngram_range : tuple, tuple for ngram range for vocabulary, (1, 1) for unigram and (1, 2) for bigram stop_words : list or set, list or set of stop words to be removed if None, max_df can be set to value [0.7, 1.0) to automatically remove vocabulary. If using "thai", this will use list of pre-populated stop words max_features : int or None, if provided, only consider number of vocabulary ordered by term frequencies max_df : float in range [0.0, 1.0] or int, default=1.0 ignore terms that have a document frequency higher than the given threshold min_df : float in range [0.0, 1.0] or int, default=1 ignore terms that have a document frequency lower than the given threshold dtype : type, optional Example ======= raw_documents = ['ฉันอยากกินข้าวของฉัน', 'ฉันอยากกินไก่', 'อยากนอนอย่างสงบ'] tokenizer = DeepcutTokenizer(ngram_range=(1, 1)) X = tokenizer.fit_tranform(raw_documents) # document-term matrix in sparse CSR format >> X.todense() >> [[0, 0, 1, 0, 1, 0, 2, 1], [0, 1, 1, 0, 1, 0, 1, 0], [1, 0, 0, 1, 1, 1, 0, 0]] >> tokenizer.vocabulary_ >> {'นอน': 0, 'ไก่': 1, 'กิน': 2, 'อย่าง': 3, 'อยาก': 4, 'สงบ': 5, 'ฉัน': 6, 'ข้าว': 7} """ def __init__(self, ngram_range=(1, 1), stop_words=None, max_df=1.0, min_df=1, max_features=None, dtype=np.dtype('float64')): self.model = get_convo_nn2() self.model.load_weights(WEIGHT_PATH) self.vocabulary_ = {} self.ngram_range = ngram_range self.dtype = dtype self.max_df = max_df self.min_df = min_df if max_df < 0 or min_df < 0: raise ValueError("negative value for max_df or min_df") self.max_features = max_features self.stop_words = _check_stop_list(stop_words) def _word_ngrams(self, tokens): """ Turn tokens into a tokens of n-grams ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L124-L153 """ # handle stop words if self.stop_words is not None: tokens = [w for w in tokens if w not in self.stop_words] # handle token n-grams min_n, max_n = self.ngram_range if max_n != 1: original_tokens = tokens if min_n == 1: # no need to do any slicing for unigrams # just iterate through the original tokens tokens = list(original_tokens) min_n += 1 else: tokens = [] n_original_tokens = len(original_tokens) # bind method outside of loop to reduce overhead tokens_append = tokens.append space_join = " ".join for n in range(min_n, min(max_n + 1, n_original_tokens + 1)): for i in range(n_original_tokens - n + 1): tokens_append(space_join(original_tokens[i: i + n])) return tokens def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. ref: https://github.com/scikit-learn/scikit-learn/blob/ef5cb84a/sklearn/feature_extraction/text.py#L734-L773 """ if high is None and low is None and limit is None: return X, set() # Calculate a mask based on document frequencies dfs = _document_frequency(X) mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: tfs = np.asarray(X.sum(axis=0)).ravel() mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = np.cumsum(mask) - 1 # maps old indices to new removed_terms = set() for term, old_index in list(vocabulary.items()): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if len(kept_indices) == 0: raise ValueError("After pruning, no terms remain. Try a lower" " min_df or a higher max_df.") return X[:, kept_indices], removed_terms def transform(self, raw_documents, new_document=False): """ raw_documents: list, list of new documents to be transformed new_document: bool, if True, assume seeing documents and build a new self.vobabulary_, if False, use the previous self.vocabulary_ """ n_doc = len(raw_documents) tokenized_documents = [] for doc in raw_documents: tokens = tokenize(doc) # method in this file tokens = self._word_ngrams(tokens) tokenized_documents.append(tokens) if new_document: self.vocabulary_ = {v: k for k, v in enumerate(set(chain.from_iterable(tokenized_documents)))} values, row_indices, col_indices = [], [], [] for r, tokens in enumerate(tokenized_documents): tokens = self._word_ngrams(tokens) feature = {} for token in tokens: word_index = self.vocabulary_.get(token) if word_index is not None: if word_index not in feature.keys(): feature[word_index] = 1 else: feature[word_index] += 1 for c, v in feature.items(): values.append(v) row_indices.append(r) col_indices.append(c) # document-term matrix in CSR format X = sp.csr_matrix((values, (row_indices, col_indices)), shape=(n_doc, len(self.vocabulary_)), dtype=self.dtype) # truncate vocabulary by max_df and min_df if new_document: max_df = self.max_df min_df = self.min_df max_doc_count = (max_df if isinstance(max_df, numbers.Integral) else max_df * n_doc) min_doc_count = (min_df if isinstance(min_df, numbers.Integral) else min_df * n_doc) if max_doc_count < min_doc_count: raise ValueError( "max_df corresponds to < documents than min_df") X, _ = self._limit_features(X, self.vocabulary_, max_doc_count, min_doc_count, self.max_features) return X def fit_tranform(self, raw_documents): """ Transform given list of raw_documents to document-term matrix in sparse CSR format (see scipy) """ X = self.transform(raw_documents, new_document=True) return X def tokenize(self, text, custom_dict=None): n_pad = 21 if not text: return [''] # case of empty string if isinstance(text, str) and sys.version_info.major == 2: text = text.decode('utf-8') x_char, x_type = create_feature_array(text, n_pad=n_pad) word_end = [] # Fix thread-related issue in Keras + TensorFlow + Flask async environment # ref: https://github.com/keras-team/keras/issues/2397 y_predict = self.model.predict([x_char, x_type]) c = [i[0] for i in y_predict.tolist()] return list(zip(list(text),c)) def save_model(self, file_path): """ Save tokenizer to pickle format """ self.model = None # set model to None to successfully save the model with open(file_path, 'wb') as f: pickle.dump(self, f)
2.5
2
spconv/utils/__init__.py
djiajunustc/spconv
0
977
# Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from cumm import tensorview as tv from contextlib import AbstractContextManager from spconv.cppconstants import CPU_ONLY_BUILD from spconv.core_cc.csrc.utils.boxops import BoxOps from spconv.core_cc.csrc.sparse.all.ops_cpu1d import Point2VoxelCPU as Point2VoxelCPU1d from spconv.core_cc.csrc.sparse.all.ops_cpu2d import Point2VoxelCPU as Point2VoxelCPU2d from spconv.core_cc.csrc.sparse.all.ops_cpu3d import Point2VoxelCPU as Point2VoxelCPU3d from spconv.core_cc.csrc.sparse.all.ops_cpu4d import Point2VoxelCPU as Point2VoxelCPU4d if not CPU_ONLY_BUILD: from spconv.core_cc.csrc.sparse.all.ops1d import Point2Voxel as Point2VoxelGPU1d from spconv.core_cc.csrc.sparse.all.ops2d import Point2Voxel as Point2VoxelGPU2d from spconv.core_cc.csrc.sparse.all.ops3d import Point2Voxel as Point2VoxelGPU3d from spconv.core_cc.csrc.sparse.all.ops4d import Point2Voxel as Point2VoxelGPU4d class nullcontext(AbstractContextManager): """Context manager that does no additional processing. Used as a stand-in for a normal context manager, when a particular block of code is only sometimes used with a normal context manager: cm = optional_cm if condition else nullcontext() with cm: # Perform operation, using optional_cm if condition is True """ def __init__(self, enter_result=None): self.enter_result = enter_result def __enter__(self): return self.enter_result def __exit__(self, *excinfo): pass def rbbox_iou(box_corners: np.ndarray, qbox_corners: np.ndarray, standup_iou: np.ndarray, standup_thresh: float): if not BoxOps.has_boost(): raise NotImplementedError( "this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild." ) N = box_corners.shape[0] K = qbox_corners.shape[0] overlap = np.zeros((N, K), dtype=box_corners.dtype) BoxOps.rbbox_iou(tv.from_numpy(box_corners), tv.from_numpy(qbox_corners), tv.from_numpy(standup_iou), tv.from_numpy(overlap), standup_thresh, False) return overlap def rbbox_intersection(box_corners: np.ndarray, qbox_corners: np.ndarray, standup_iou: np.ndarray, standup_thresh: float): if not BoxOps.has_boost(): raise NotImplementedError( "this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild." ) N = box_corners.shape[0] K = qbox_corners.shape[0] overlap = np.zeros((N, K), dtype=box_corners.dtype) BoxOps.rbbox_iou(tv.from_numpy(box_corners), tv.from_numpy(qbox_corners), tv.from_numpy(standup_iou), tv.from_numpy(overlap), standup_thresh, True) return overlap def rbbox_iou_loss(box_corners: np.ndarray, qbox_corners: np.ndarray): if not BoxOps.has_boost(): raise NotImplementedError( "this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild." ) N = box_corners.shape[0] overlap = np.zeros((N, ), dtype=box_corners.dtype) BoxOps.rbbox_iou_aligned(tv.from_numpy(box_corners), tv.from_numpy(qbox_corners), tv.from_numpy(overlap), False) return overlap def non_max_suppression_cpu(boxes: np.ndarray, order: np.ndarray, thresh: float, eps: float = 0.0): return BoxOps.non_max_suppression_cpu(tv.from_numpy(boxes), tv.from_numpy(order), thresh, eps) def rotate_non_max_suppression_cpu(boxes: np.ndarray, order: np.ndarray, standup_iou: np.ndarray, thresh: float): if not BoxOps.has_boost(): raise NotImplementedError( "this op require spconv built with boost, download boost, export BOOST_ROOT and rebuild." ) return BoxOps.rotate_non_max_suppression_cpu(tv.from_numpy(boxes), tv.from_numpy(order), tv.from_numpy(standup_iou), thresh)
1.320313
1
build/android/gyp/lint.py
justremotephone/android_external_chromium_org
2
978
<gh_stars>1-10 #!/usr/bin/env python # # Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs Android's lint tool.""" import optparse import os import sys from xml.dom import minidom from util import build_utils _SRC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) def _RunLint(lint_path, config_path, processed_config_path, manifest_path, result_path, product_dir, src_dirs, classes_dir): def _RelativizePath(path): """Returns relative path to top-level src dir. Args: path: A path relative to cwd. """ return os.path.relpath(os.path.abspath(path), _SRC_ROOT) def _ProcessConfigFile(): if not build_utils.IsTimeStale(processed_config_path, [config_path]): return with open(config_path, 'rb') as f: content = f.read().replace( 'PRODUCT_DIR', _RelativizePath(product_dir)) with open(processed_config_path, 'wb') as f: f.write(content) def _ProcessResultFile(): with open(result_path, 'rb') as f: content = f.read().replace( _RelativizePath(product_dir), 'PRODUCT_DIR') with open(result_path, 'wb') as f: f.write(content) def _ParseAndShowResultFile(): dom = minidom.parse(result_path) issues = dom.getElementsByTagName('issue') print >> sys.stderr for issue in issues: issue_id = issue.attributes['id'].value severity = issue.attributes['severity'].value message = issue.attributes['message'].value location_elem = issue.getElementsByTagName('location')[0] path = location_elem.attributes['file'].value line = location_elem.getAttribute('line') if line: error = '%s:%s %s: %s [%s]' % (path, line, severity, message, issue_id) else: # Issues in class files don't have a line number. error = '%s %s: %s [%s]' % (path, severity, message, issue_id) print >> sys.stderr, error for attr in ['errorLine1', 'errorLine2']: error_line = issue.getAttribute(attr) if error_line: print >> sys.stderr, error_line return len(issues) _ProcessConfigFile() cmd = [ lint_path, '-Werror', '--exitcode', '--showall', '--config', _RelativizePath(processed_config_path), '--classpath', _RelativizePath(classes_dir), '--xml', _RelativizePath(result_path), ] for src in src_dirs: cmd.extend(['--sources', _RelativizePath(src)]) cmd.append(_RelativizePath(os.path.join(manifest_path, os.pardir))) if os.path.exists(result_path): os.remove(result_path) try: build_utils.CheckOutput(cmd, cwd=_SRC_ROOT) except build_utils.CalledProcessError: # There is a problem with lint usage if not os.path.exists(result_path): raise # There are actual lint issues else: num_issues = _ParseAndShowResultFile() _ProcessResultFile() msg = ('\nLint found %d new issues.\n' ' - For full explanation refer to %s\n' ' - Wanna suppress these issues?\n' ' 1. Read comment in %s\n' ' 2. Run "python %s %s"\n' % (num_issues, _RelativizePath(result_path), _RelativizePath(config_path), _RelativizePath(os.path.join(_SRC_ROOT, 'build', 'android', 'lint', 'suppress.py')), _RelativizePath(result_path))) print >> sys.stderr, msg # Lint errors do not fail the build. return 0 return 0 def main(): parser = optparse.OptionParser() parser.add_option('--lint-path', help='Path to lint executable.') parser.add_option('--config-path', help='Path to lint suppressions file.') parser.add_option('--processed-config-path', help='Path to processed lint suppressions file.') parser.add_option('--manifest-path', help='Path to AndroidManifest.xml') parser.add_option('--result-path', help='Path to XML lint result file.') parser.add_option('--product-dir', help='Path to product dir.') parser.add_option('--src-dirs', help='Directories containing java files.') parser.add_option('--classes-dir', help='Directory containing class files.') parser.add_option('--stamp', help='Path to touch on success.') parser.add_option('--enable', action='store_true', help='Run lint instead of just touching stamp.') options, _ = parser.parse_args() build_utils.CheckOptions( options, parser, required=['lint_path', 'config_path', 'processed_config_path', 'manifest_path', 'result_path', 'product_dir', 'src_dirs', 'classes_dir']) src_dirs = build_utils.ParseGypList(options.src_dirs) rc = 0 if options.enable: rc = _RunLint(options.lint_path, options.config_path, options.processed_config_path, options.manifest_path, options.result_path, options.product_dir, src_dirs, options.classes_dir) if options.stamp and not rc: build_utils.Touch(options.stamp) return rc if __name__ == '__main__': sys.exit(main())
2.1875
2
clif/pybind11/generator.py
snu5mumr1k/clif
0
979
<filename>clif/pybind11/generator.py # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates pybind11 bindings code.""" from typing import Dict, Generator, List, Text, Set from clif.protos import ast_pb2 from clif.pybind11 import classes from clif.pybind11 import enums from clif.pybind11 import function from clif.pybind11 import function_lib from clif.pybind11 import type_casters from clif.pybind11 import utils I = utils.I class ModuleGenerator(object): """A class that generates pybind11 bindings code from CLIF ast.""" def __init__(self, ast: ast_pb2.AST, module_name: str, header_path: str, include_paths: List[str]): self._ast = ast self._module_name = module_name self._header_path = header_path self._include_paths = include_paths self._unique_classes = {} def generate_header(self, ast: ast_pb2.AST) -> Generator[str, None, None]: """Generates pybind11 bindings code from CLIF ast.""" includes = set() for decl in ast.decls: includes.add(decl.cpp_file) self._collect_class_cpp_names(decl) yield '#include "third_party/pybind11/include/pybind11/smart_holder.h"' for include in includes: yield f'#include "{include}"' yield '\n' for cpp_name in self._unique_classes: yield f'PYBIND11_SMART_HOLDER_TYPE_CASTERS({cpp_name})' yield '\n' for cpp_name, py_name in self._unique_classes.items(): yield f'// CLIF use `{cpp_name}` as {py_name}' def generate_from(self, ast: ast_pb2.AST): """Generates pybind11 bindings code from CLIF ast. Args: ast: CLIF ast protobuf. Yields: Generated pybind11 bindings code. """ yield from self._generate_headlines() # Find and keep track of virtual functions. python_override_class_names = {} for decl in ast.decls: yield from self._generate_python_override_class_names( python_override_class_names, decl) self._collect_class_cpp_names(decl) yield from type_casters.generate_from(ast, self._include_paths) yield f'PYBIND11_MODULE({self._module_name}, m) {{' yield from self._generate_import_modules(ast) yield I+('m.doc() = "CLIF-generated pybind11-based module for ' f'{ast.source}";') yield I + 'py::google::ImportStatusModule();' for decl in ast.decls: if decl.decltype == ast_pb2.Decl.Type.FUNC: for s in function.generate_from('m', decl.func, None): yield I + s elif decl.decltype == ast_pb2.Decl.Type.CONST: yield from self._generate_const_variables(decl.const) elif decl.decltype == ast_pb2.Decl.Type.CLASS: yield from classes.generate_from( decl.class_, 'm', python_override_class_names.get(decl.class_.name.cpp_name, '')) elif decl.decltype == ast_pb2.Decl.Type.ENUM: yield from enums.generate_from('m', decl.enum) yield '' yield '}' def _generate_import_modules(self, ast: ast_pb2.AST) -> Generator[str, None, None]: for include in ast.pybind11_includes: # Converts `full/project/path/cheader_pybind11_clif.h` to # `full.project.path.cheader_pybind11` names = include.split('/') names.insert(0, 'google3') names[-1] = names[-1][:-len('_clif.h')] module = '.'.join(names) yield f'py::module_::import("{module}");' def _generate_headlines(self): """Generates #includes and headers.""" includes = set() for decl in self._ast.decls: includes.add(decl.cpp_file) if decl.decltype == ast_pb2.Decl.Type.CONST: self._generate_const_variables_headers(decl.const, includes) for include in self._ast.pybind11_includes: includes.add(include) for include in self._ast.usertype_includes: includes.add(include) yield '#include "third_party/pybind11/include/pybind11/complex.h"' yield '#include "third_party/pybind11/include/pybind11/functional.h"' yield '#include "third_party/pybind11/include/pybind11/operators.h"' yield '#include "third_party/pybind11/include/pybind11/smart_holder.h"' yield '// potential future optimization: generate this line only as needed.' yield '#include "third_party/pybind11/include/pybind11/stl.h"' yield '' yield '#include "clif/pybind11/runtime.h"' yield '#include "clif/pybind11/type_casters.h"' yield '' for include in includes: yield f'#include "{include}"' yield f'#include "{self._header_path}"' yield '' yield 'namespace py = pybind11;' yield '' def _generate_const_variables_headers(self, const_decl: ast_pb2.ConstDecl, includes: Set[str]): if const_decl.type.lang_type == 'complex': includes.add('third_party/pybind11/include/pybind11/complex.h') if (const_decl.type.lang_type.startswith('list<') or const_decl.type.lang_type.startswith('dict<') or const_decl.type.lang_type.startswith('set<')): includes.add('third_party/pybind11/include/pybind11/stl.h') def _generate_const_variables(self, const_decl: ast_pb2.ConstDecl): """Generates variables.""" lang_type = const_decl.type.lang_type if (lang_type in {'int', 'float', 'double', 'bool', 'str'} or lang_type.startswith('tuple<')): const_def = I + (f'm.attr("{const_decl.name.native}") = ' f'{const_decl.name.cpp_name};') else: const_def = I + (f'm.attr("{const_decl.name.native}") = ' f'py::cast({const_decl.name.cpp_name});') yield const_def def _generate_python_override_class_names( self, python_override_class_names: Dict[Text, Text], decl: ast_pb2.Decl, trampoline_name_suffix: str = '_trampoline', self_life_support: str = 'py::trampoline_self_life_support'): """Generates Python overrides classes dictionary for virtual functions.""" if decl.decltype == ast_pb2.Decl.Type.CLASS: virtual_members = [] for member in decl.class_.members: if member.decltype == ast_pb2.Decl.Type.FUNC and member.func.virtual: virtual_members.append(member) if not virtual_members: return python_override_class_name = ( f'{decl.class_.name.native}_{trampoline_name_suffix}') assert decl.class_.name.cpp_name not in python_override_class_names python_override_class_names[ decl.class_.name.cpp_name] = python_override_class_name yield (f'struct {python_override_class_name} : ' f'{decl.class_.name.cpp_name}, {self_life_support} {{') yield I + ( f'using {decl.class_.name.cpp_name}::{decl.class_.name.native};') for member in virtual_members: yield from self._generate_virtual_function( decl.class_.name.native, member.func) if python_override_class_name: yield '};' def _generate_virtual_function(self, class_name: str, func_decl: ast_pb2.FuncDecl): """Generates virtual function overrides calling Python methods.""" return_type = '' if func_decl.cpp_void_return: return_type = 'void' elif func_decl.returns: for v in func_decl.returns: if v.HasField('cpp_exact_type'): return_type = v.cpp_exact_type params = ', '.join([f'{p.name.cpp_name}' for p in func_decl.params]) params_list_with_types = [] for p in func_decl.params: params_list_with_types.append( f'{function_lib.generate_param_type(p)} {p.name.cpp_name}') params_str_with_types = ', '.join(params_list_with_types) cpp_const = '' if func_decl.cpp_const_method: cpp_const = ' const' yield I + (f'{return_type} ' f'{func_decl.name.native}({params_str_with_types}) ' f'{cpp_const} override {{') if func_decl.is_pure_virtual: pybind11_override = 'PYBIND11_OVERRIDE_PURE' else: pybind11_override = 'PYBIND11_OVERRIDE' yield I + I + f'{pybind11_override}(' yield I + I + I + f'{return_type},' yield I + I + I + f'{class_name},' yield I + I + I + f'{func_decl.name.native},' yield I + I + I + f'{params}' yield I + I + ');' yield I + '}' def _collect_class_cpp_names(self, decl: ast_pb2.Decl, parent_name: str = '') -> None: """Adds every class name to a set. Only to be used in this context.""" if decl.decltype == ast_pb2.Decl.Type.CLASS: full_native_name = decl.class_.name.native if parent_name: full_native_name = '.'.join([parent_name, decl.class_.name.native]) self._unique_classes[decl.class_.name.cpp_name] = full_native_name for member in decl.class_.members: self._collect_class_cpp_names(member, full_native_name) def write_to(channel, lines): """Writes the generated code to files.""" for s in lines: channel.write(s) channel.write('\n')
2.125
2
advent/model/discriminator.py
ChristopheGraveline064/ADVENT
1
980
from torch import nn def get_fc_discriminator(num_classes, ndf=64): return nn.Sequential( nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=2, padding=1), ) # def get_fe_discriminator(num_classes, ndf=64): # 256-128-64-32-16 # return nn.Sequential( # nn.Conv2d(num_classes, ndf * 4, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf * 4, ndf * 2, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf * 2, ndf, kernel_size=2, stride=2, padding=0), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # # nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1), # # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf, 1, kernel_size=2, stride=2, padding=0), # ) # def get_fe_discriminator(num_classes, ndf=64): # return nn.Sequential( # nn.Conv2d(num_classes, ndf, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf, ndf * 2, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf * 2, ndf * 4, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), # # nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1), # # nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf * 4, 1, kernel_size=1, stride=1, padding=0), # ) def get_fe_discriminator(num_classes, ndf=64): # H/8,H/8,(1024 -> 256 -> 128 -> 64 -> 1) return nn.Sequential( nn.Conv2d(num_classes, ndf * 4, kernel_size=1, stride=1, padding=0), # x=self.dropout(x) nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(ndf * 4, ndf * 2, kernel_size=1, stride=1, padding=0), # x=self.dropout(x) nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(ndf * 2, ndf, kernel_size=1, stride=1, padding=0), # x=self.dropout(x) nn.LeakyReLU(negative_slope=0.2, inplace=True), # nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1), # nn.LeakyReLU(negative_slope=0.2, inplace=True), nn.Conv2d(ndf, 1, kernel_size=1, stride=1, padding=0), )
2.5
2
spyder/dependencies.py
aglotero/spyder
2
981
<reponame>aglotero/spyder # -*- coding: utf-8 -*- # # Copyright © Spyder Project Contributors # Licensed under the terms of the MIT License # (see spyder/__init__.py for details) """Module checking Spyder runtime dependencies""" import os # Local imports from spyder.utils import programs class Dependency(object): """Spyder's dependency version may starts with =, >=, > or < to specify the exact requirement ; multiple conditions may be separated by ';' (e.g. '>=0.13;<1.0')""" OK = 'OK' NOK = 'NOK' def __init__(self, modname, features, required_version, installed_version=None, optional=False): self.modname = modname self.features = features self.required_version = required_version self.optional = optional if installed_version is None: try: self.installed_version = programs.get_module_version(modname) except: # NOTE: Don't add any exception type here! # Modules can fail to import in several ways besides # ImportError self.installed_version = None else: self.installed_version = installed_version def check(self): """Check if dependency is installed""" return programs.is_module_installed(self.modname, self.required_version, self.installed_version) def get_installed_version(self): """Return dependency status (string)""" if self.check(): return '%s (%s)' % (self.installed_version, self.OK) else: return '%s (%s)' % (self.installed_version, self.NOK) def get_status(self): """Return dependency status (string)""" if self.check(): return self.OK else: return self.NOK DEPENDENCIES = [] def add(modname, features, required_version, installed_version=None, optional=False): """Add Spyder dependency""" global DEPENDENCIES for dependency in DEPENDENCIES: if dependency.modname == modname: raise ValueError("Dependency has already been registered: %s"\ % modname) DEPENDENCIES += [Dependency(modname, features, required_version, installed_version, optional)] def check(modname): """Check if required dependency is installed""" for dependency in DEPENDENCIES: if dependency.modname == modname: return dependency.check() else: raise RuntimeError("Unkwown dependency %s" % modname) def status(deps=DEPENDENCIES, linesep=os.linesep): """Return a status of dependencies""" maxwidth = 0 col1 = [] col2 = [] for dependency in deps: title1 = dependency.modname title1 += ' ' + dependency.required_version col1.append(title1) maxwidth = max([maxwidth, len(title1)]) col2.append(dependency.get_installed_version()) text = "" for index in range(len(deps)): text += col1[index].ljust(maxwidth) + ': ' + col2[index] + linesep return text[:-1] def missing_dependencies(): """Return the status of missing dependencies (if any)""" missing_deps = [] for dependency in DEPENDENCIES: if not dependency.check() and not dependency.optional: missing_deps.append(dependency) if missing_deps: return status(deps=missing_deps, linesep='<br>') else: return ""
2.53125
3
setup.py
jasperhyp/Chemprop4SE
1
982
import os from setuptools import find_packages, setup # Load version number __version__ = None src_dir = os.path.abspath(os.path.dirname(__file__)) version_file = os.path.join(src_dir, 'chemprop', '_version.py') with open(version_file, encoding='utf-8') as fd: exec(fd.read()) # Load README with open('README.md', encoding='utf-8') as f: long_description = f.read() setup( name='chemprop', version=__version__, author='<NAME>, <NAME>, <NAME>, <NAME>, <NAME>', author_email='<EMAIL>', description='Molecular Property Prediction with Message Passing Neural Networks', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/chemprop/chemprop', download_url=f'https://github.com/chemprop/chemprop/v_{__version__}.tar.gz', project_urls={ 'Documentation': 'https://chemprop.readthedocs.io/en/latest/', 'Source': 'https://github.com/chemprop/chemprop', 'PyPi': 'https://pypi.org/project/chemprop/', 'Demo': 'http://chemprop.csail.mit.edu/', }, license='MIT', packages=find_packages(), package_data={'chemprop': ['py.typed']}, entry_points={ 'console_scripts': [ 'chemprop_train=chemprop.train:chemprop_train', 'chemprop_predict=chemprop.train:chemprop_predict', 'chemprop_fingerprint=chemprop.train:chemprop_fingerprint', 'chemprop_hyperopt=chemprop.hyperparameter_optimization:chemprop_hyperopt', 'chemprop_interpret=chemprop.interpret:chemprop_interpret', 'chemprop_web=chemprop.web.run:chemprop_web', 'sklearn_train=chemprop.sklearn_train:sklearn_train', 'sklearn_predict=chemprop.sklearn_predict:sklearn_predict', ] }, install_requires=[ 'flask>=1.1.2', 'hyperopt>=0.2.3', 'matplotlib>=3.1.3', 'numpy>=1.18.1', 'pandas>=1.0.3', 'pandas-flavor>=0.2.0', 'scikit-learn>=0.22.2.post1', 'scipy>=1.4.1', 'sphinx>=3.1.2', 'tensorboardX>=2.0', 'torch>=1.5.1', 'tqdm>=4.45.0', 'typed-argument-parser>=1.6.1' ], extras_require={ 'test': [ 'pytest>=6.2.2', 'parameterized>=0.8.1' ] }, python_requires='>=3.6', classifiers=[ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent' ], keywords=[ 'chemistry', 'machine learning', 'property prediction', 'message passing neural network', 'graph neural network' ] )
1.46875
1
mars/tensor/indexing/slice.py
HarshCasper/mars
2
983
# Copyright 1999-2020 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ... import opcodes as OperandDef from ...serialize import KeyField, ListField from ..operands import TensorHasInput, TensorOperandMixin from ..array_utils import get_array_module from ..core import TensorOrder class TensorSlice(TensorHasInput, TensorOperandMixin): _op_type_ = OperandDef.SLICE _input = KeyField('input') _slices = ListField('slices') def __init__(self, slices=None, dtype=None, sparse=False, **kw): super().__init__(_slices=slices, _dtype=dtype, _sparse=sparse, **kw) @property def slices(self): return self._slices def _set_inputs(self, inputs): super()._set_inputs(inputs) self._input = self._inputs[0] def _get_order(self, kw, i): order = kw.pop('order', None) if order is None: inp = self.input if inp is None or inp.order == TensorOrder.C_ORDER: return TensorOrder.C_ORDER for shape, slc in zip(inp.shape, self._slices): if slc is None: continue s = slc.indices(shape) if s[0] == 0 and s[1] == shape and s[2] == 1: continue else: return TensorOrder.C_ORDER return inp.order return order[i] if isinstance(order, (list, tuple)) else order @classmethod def execute(cls, ctx, op): inp = ctx[op.inputs[0].key] if op.input.ndim == 0 and not hasattr(inp, 'shape'): # scalar, but organize it into an array inp = get_array_module(inp).array(inp) x = inp[tuple(op.slices)] out = op.outputs[0] ctx[out.key] = x.astype(x.dtype, order=out.order.value, copy=False)
1.851563
2
ftplugin/python/python/pyflakes/pyflakes/checker.py
leewckk/vim.configuration
0
984
""" Main module. Implement the central Checker class. Also, it models the Bindings and Scopes. """ import __future__ import doctest import os import sys PY2 = sys.version_info < (3, 0) PY32 = sys.version_info < (3, 3) # Python 2.5 to 3.2 PY33 = sys.version_info < (3, 4) # Python 2.5 to 3.3 PY34 = sys.version_info < (3, 5) # Python 2.5 to 3.4 try: sys.pypy_version_info PYPY = True except AttributeError: PYPY = False builtin_vars = dir(__import__('__builtin__' if PY2 else 'builtins')) try: import ast except ImportError: # Python 2.5 import _ast as ast if 'decorator_list' not in ast.ClassDef._fields: # Patch the missing attribute 'decorator_list' ast.ClassDef.decorator_list = () ast.FunctionDef.decorator_list = property(lambda s: s.decorators) from pyflakes import messages if PY2: def getNodeType(node_class): # workaround str.upper() which is locale-dependent return str(unicode(node_class.__name__).upper()) else: def getNodeType(node_class): return node_class.__name__.upper() # Python >= 3.3 uses ast.Try instead of (ast.TryExcept + ast.TryFinally) if PY32: def getAlternatives(n): if isinstance(n, (ast.If, ast.TryFinally)): return [n.body] if isinstance(n, ast.TryExcept): return [n.body + n.orelse] + [[hdl] for hdl in n.handlers] else: def getAlternatives(n): if isinstance(n, ast.If): return [n.body] if isinstance(n, ast.Try): return [n.body + n.orelse] + [[hdl] for hdl in n.handlers] if PY34: LOOP_TYPES = (ast.While, ast.For) else: LOOP_TYPES = (ast.While, ast.For, ast.AsyncFor) class _FieldsOrder(dict): """Fix order of AST node fields.""" def _get_fields(self, node_class): # handle iter before target, and generators before element fields = node_class._fields if 'iter' in fields: key_first = 'iter'.find elif 'generators' in fields: key_first = 'generators'.find else: key_first = 'value'.find return tuple(sorted(fields, key=key_first, reverse=True)) def __missing__(self, node_class): self[node_class] = fields = self._get_fields(node_class) return fields def counter(items): """ Simplest required implementation of collections.Counter. Required as 2.6 does not have Counter in collections. """ results = {} for item in items: results[item] = results.get(item, 0) + 1 return results def iter_child_nodes(node, omit=None, _fields_order=_FieldsOrder()): """ Yield all direct child nodes of *node*, that is, all fields that are nodes and all items of fields that are lists of nodes. """ for name in _fields_order[node.__class__]: if name == omit: continue field = getattr(node, name, None) if isinstance(field, ast.AST): yield field elif isinstance(field, list): for item in field: yield item def convert_to_value(item): if isinstance(item, ast.Str): return item.s elif hasattr(ast, 'Bytes') and isinstance(item, ast.Bytes): return item.s elif isinstance(item, ast.Tuple): return tuple(convert_to_value(i) for i in item.elts) elif isinstance(item, ast.Num): return item.n elif isinstance(item, ast.Name): result = VariableKey(item=item) constants_lookup = { 'True': True, 'False': False, 'None': None, } return constants_lookup.get( result.name, result, ) elif (not PY33) and isinstance(item, ast.NameConstant): # None, True, False are nameconstants in python3, but names in 2 return item.value else: return UnhandledKeyType() class Binding(object): """ Represents the binding of a value to a name. The checker uses this to keep track of which names have been bound and which names have not. See L{Assignment} for a special type of binding that is checked with stricter rules. @ivar used: pair of (L{Scope}, node) indicating the scope and the node that this binding was last used. """ def __init__(self, name, source): self.name = name self.source = source self.used = False def __str__(self): return self.name def __repr__(self): return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__, self.name, self.source.lineno, id(self)) def redefines(self, other): return isinstance(other, Definition) and self.name == other.name class Definition(Binding): """ A binding that defines a function or a class. """ class UnhandledKeyType(object): """ A dictionary key of a type that we cannot or do not check for duplicates. """ class VariableKey(object): """ A dictionary key which is a variable. @ivar item: The variable AST object. """ def __init__(self, item): self.name = item.id def __eq__(self, compare): return ( compare.__class__ == self.__class__ and compare.name == self.name ) def __hash__(self): return hash(self.name) class Importation(Definition): """ A binding created by an import statement. @ivar fullName: The complete name given to the import statement, possibly including multiple dotted components. @type fullName: C{str} """ def __init__(self, name, source, full_name=None): self.fullName = full_name or name self.redefined = [] super(Importation, self).__init__(name, source) def redefines(self, other): if isinstance(other, SubmoduleImportation): # See note in SubmoduleImportation about RedefinedWhileUnused return self.fullName == other.fullName return isinstance(other, Definition) and self.name == other.name def _has_alias(self): """Return whether importation needs an as clause.""" return not self.fullName.split('.')[-1] == self.name @property def source_statement(self): """Generate a source statement equivalent to the import.""" if self._has_alias(): return 'import %s as %s' % (self.fullName, self.name) else: return 'import %s' % self.fullName def __str__(self): """Return import full name with alias.""" if self._has_alias(): return self.fullName + ' as ' + self.name else: return self.fullName class SubmoduleImportation(Importation): """ A binding created by a submodule import statement. A submodule import is a special case where the root module is implicitly imported, without an 'as' clause, and the submodule is also imported. Python does not restrict which attributes of the root module may be used. This class is only used when the submodule import is without an 'as' clause. pyflakes handles this case by registering the root module name in the scope, allowing any attribute of the root module to be accessed. RedefinedWhileUnused is suppressed in `redefines` unless the submodule name is also the same, to avoid false positives. """ def __init__(self, name, source): # A dot should only appear in the name when it is a submodule import assert '.' in name and (not source or isinstance(source, ast.Import)) package_name = name.split('.')[0] super(SubmoduleImportation, self).__init__(package_name, source) self.fullName = name def redefines(self, other): if isinstance(other, Importation): return self.fullName == other.fullName return super(SubmoduleImportation, self).redefines(other) def __str__(self): return self.fullName @property def source_statement(self): return 'import ' + self.fullName class ImportationFrom(Importation): def __init__(self, name, source, module, real_name=None): self.module = module self.real_name = real_name or name if module.endswith('.'): full_name = module + self.real_name else: full_name = module + '.' + self.real_name super(ImportationFrom, self).__init__(name, source, full_name) def __str__(self): """Return import full name with alias.""" if self.real_name != self.name: return self.fullName + ' as ' + self.name else: return self.fullName @property def source_statement(self): if self.real_name != self.name: return 'from %s import %s as %s' % (self.module, self.real_name, self.name) else: return 'from %s import %s' % (self.module, self.name) class StarImportation(Importation): """A binding created by a 'from x import *' statement.""" def __init__(self, name, source): super(StarImportation, self).__init__('*', source) # Each star importation needs a unique name, and # may not be the module name otherwise it will be deemed imported self.name = name + '.*' self.fullName = name @property def source_statement(self): return 'from ' + self.fullName + ' import *' def __str__(self): # When the module ends with a ., avoid the ambiguous '..*' if self.fullName.endswith('.'): return self.source_statement else: return self.name class FutureImportation(ImportationFrom): """ A binding created by a from `__future__` import statement. `__future__` imports are implicitly used. """ def __init__(self, name, source, scope): super(FutureImportation, self).__init__(name, source, '__future__') self.used = (scope, source) class Argument(Binding): """ Represents binding a name as an argument. """ class Assignment(Binding): """ Represents binding a name with an explicit assignment. The checker will raise warnings for any Assignment that isn't used. Also, the checker does not consider assignments in tuple/list unpacking to be Assignments, rather it treats them as simple Bindings. """ class FunctionDefinition(Definition): pass class ClassDefinition(Definition): pass class ExportBinding(Binding): """ A binding created by an C{__all__} assignment. If the names in the list can be determined statically, they will be treated as names for export and additional checking applied to them. The only C{__all__} assignment that can be recognized is one which takes the value of a literal list containing literal strings. For example:: __all__ = ["foo", "bar"] Names which are imported and not otherwise used but appear in the value of C{__all__} will not have an unused import warning reported for them. """ def __init__(self, name, source, scope): if '__all__' in scope and isinstance(source, ast.AugAssign): self.names = list(scope['__all__'].names) else: self.names = [] if isinstance(source.value, (ast.List, ast.Tuple)): for node in source.value.elts: if isinstance(node, ast.Str): self.names.append(node.s) super(ExportBinding, self).__init__(name, source) class Scope(dict): importStarred = False # set to True when import * is found def __repr__(self): scope_cls = self.__class__.__name__ return '<%s at 0x%x %s>' % (scope_cls, id(self), dict.__repr__(self)) class ClassScope(Scope): pass class FunctionScope(Scope): """ I represent a name scope for a function. @ivar globals: Names declared 'global' in this function. """ usesLocals = False alwaysUsed = set(['__tracebackhide__', '__traceback_info__', '__traceback_supplement__']) def __init__(self): super(FunctionScope, self).__init__() # Simplify: manage the special locals as globals self.globals = self.alwaysUsed.copy() self.returnValue = None # First non-empty return self.isGenerator = False # Detect a generator def unusedAssignments(self): """ Return a generator for the assignments which have not been used. """ for name, binding in self.items(): if (not binding.used and name not in self.globals and not self.usesLocals and isinstance(binding, Assignment)): yield name, binding class GeneratorScope(Scope): pass class ModuleScope(Scope): """Scope for a module.""" _futures_allowed = True class DoctestScope(ModuleScope): """Scope for a doctest.""" # Globally defined names which are not attributes of the builtins module, or # are only present on some platforms. _MAGIC_GLOBALS = ['__file__', '__builtins__', 'WindowsError'] def getNodeName(node): # Returns node.id, or node.name, or None if hasattr(node, 'id'): # One of the many nodes with an id return node.id if hasattr(node, 'name'): # an ExceptHandler node return node.name class Checker(object): """ I check the cleanliness and sanity of Python code. @ivar _deferredFunctions: Tracking list used by L{deferFunction}. Elements of the list are two-tuples. The first element is the callable passed to L{deferFunction}. The second element is a copy of the scope stack at the time L{deferFunction} was called. @ivar _deferredAssignments: Similar to C{_deferredFunctions}, but for callables which are deferred assignment checks. """ nodeDepth = 0 offset = None traceTree = False builtIns = set(builtin_vars).union(_MAGIC_GLOBALS) _customBuiltIns = os.environ.get('PYFLAKES_BUILTINS') if _customBuiltIns: builtIns.update(_customBuiltIns.split(',')) del _customBuiltIns def __init__(self, tree, filename='(none)', builtins=None, withDoctest='PYFLAKES_DOCTEST' in os.environ): self._nodeHandlers = {} self._deferredFunctions = [] self._deferredAssignments = [] self.deadScopes = [] self.messages = [] self.filename = filename if builtins: self.builtIns = self.builtIns.union(builtins) self.withDoctest = withDoctest self.scopeStack = [ModuleScope()] self.exceptHandlers = [()] self.root = tree self.handleChildren(tree) self.runDeferred(self._deferredFunctions) # Set _deferredFunctions to None so that deferFunction will fail # noisily if called after we've run through the deferred functions. self._deferredFunctions = None self.runDeferred(self._deferredAssignments) # Set _deferredAssignments to None so that deferAssignment will fail # noisily if called after we've run through the deferred assignments. self._deferredAssignments = None del self.scopeStack[1:] self.popScope() self.checkDeadScopes() def deferFunction(self, callable): """ Schedule a function handler to be called just before completion. This is used for handling function bodies, which must be deferred because code later in the file might modify the global scope. When `callable` is called, the scope at the time this is called will be restored, however it will contain any new bindings added to it. """ self._deferredFunctions.append((callable, self.scopeStack[:], self.offset)) def deferAssignment(self, callable): """ Schedule an assignment handler to be called just after deferred function handlers. """ self._deferredAssignments.append((callable, self.scopeStack[:], self.offset)) def runDeferred(self, deferred): """ Run the callables in C{deferred} using their associated scope stack. """ for handler, scope, offset in deferred: self.scopeStack = scope self.offset = offset handler() def _in_doctest(self): return (len(self.scopeStack) >= 2 and isinstance(self.scopeStack[1], DoctestScope)) @property def futuresAllowed(self): if not all(isinstance(scope, ModuleScope) for scope in self.scopeStack): return False return self.scope._futures_allowed @futuresAllowed.setter def futuresAllowed(self, value): assert value is False if isinstance(self.scope, ModuleScope): self.scope._futures_allowed = False @property def scope(self): return self.scopeStack[-1] def popScope(self): self.deadScopes.append(self.scopeStack.pop()) def checkDeadScopes(self): """ Look at scopes which have been fully examined and report names in them which were imported but unused. """ for scope in self.deadScopes: # imports in classes are public members if isinstance(scope, ClassScope): continue all_binding = scope.get('__all__') if all_binding and not isinstance(all_binding, ExportBinding): all_binding = None if all_binding: all_names = set(all_binding.names) undefined = all_names.difference(scope) else: all_names = undefined = [] if undefined: if not scope.importStarred and \ os.path.basename(self.filename) != '__init__.py': # Look for possible mistakes in the export list for name in undefined: self.report(messages.UndefinedExport, scope['__all__'].source, name) # mark all import '*' as used by the undefined in __all__ if scope.importStarred: for binding in scope.values(): if isinstance(binding, StarImportation): binding.used = all_binding # Look for imported names that aren't used. for value in scope.values(): if isinstance(value, Importation): used = value.used or value.name in all_names if not used: messg = messages.UnusedImport self.report(messg, value.source, str(value)) for node in value.redefined: if isinstance(self.getParent(node), ast.For): messg = messages.ImportShadowedByLoopVar elif used: continue else: messg = messages.RedefinedWhileUnused self.report(messg, node, value.name, value.source) def pushScope(self, scopeClass=FunctionScope): self.scopeStack.append(scopeClass()) def report(self, messageClass, *args, **kwargs): self.messages.append(messageClass(self.filename, *args, **kwargs)) def getParent(self, node): # Lookup the first parent which is not Tuple, List or Starred while True: node = node.parent if not hasattr(node, 'elts') and not hasattr(node, 'ctx'): return node def getCommonAncestor(self, lnode, rnode, stop): if stop in (lnode, rnode) or not (hasattr(lnode, 'parent') and hasattr(rnode, 'parent')): return None if lnode is rnode: return lnode if (lnode.depth > rnode.depth): return self.getCommonAncestor(lnode.parent, rnode, stop) if (lnode.depth < rnode.depth): return self.getCommonAncestor(lnode, rnode.parent, stop) return self.getCommonAncestor(lnode.parent, rnode.parent, stop) def descendantOf(self, node, ancestors, stop): for a in ancestors: if self.getCommonAncestor(node, a, stop): return True return False def differentForks(self, lnode, rnode): """True, if lnode and rnode are located on different forks of IF/TRY""" ancestor = self.getCommonAncestor(lnode, rnode, self.root) parts = getAlternatives(ancestor) if parts: for items in parts: if self.descendantOf(lnode, items, ancestor) ^ \ self.descendantOf(rnode, items, ancestor): return True return False def addBinding(self, node, value): """ Called when a binding is altered. - `node` is the statement responsible for the change - `value` is the new value, a Binding instance """ # assert value.source in (node, node.parent): for scope in self.scopeStack[::-1]: if value.name in scope: break existing = scope.get(value.name) if existing and not self.differentForks(node, existing.source): parent_stmt = self.getParent(value.source) if isinstance(existing, Importation) and isinstance(parent_stmt, ast.For): self.report(messages.ImportShadowedByLoopVar, node, value.name, existing.source) elif scope is self.scope: if (isinstance(parent_stmt, ast.comprehension) and not isinstance(self.getParent(existing.source), (ast.For, ast.comprehension))): self.report(messages.RedefinedInListComp, node, value.name, existing.source) elif not existing.used and value.redefines(existing): self.report(messages.RedefinedWhileUnused, node, value.name, existing.source) elif isinstance(existing, Importation) and value.redefines(existing): existing.redefined.append(node) if value.name in self.scope: # then assume the rebound name is used as a global or within a loop value.used = self.scope[value.name].used self.scope[value.name] = value def getNodeHandler(self, node_class): try: return self._nodeHandlers[node_class] except KeyError: nodeType = getNodeType(node_class) self._nodeHandlers[node_class] = handler = getattr(self, nodeType) return handler def handleNodeLoad(self, node): name = getNodeName(node) if not name: return in_generators = None importStarred = None # try enclosing function scopes and global scope for scope in self.scopeStack[-1::-1]: # only generators used in a class scope can access the names # of the class. this is skipped during the first iteration if in_generators is False and isinstance(scope, ClassScope): continue try: scope[name].used = (self.scope, node) except KeyError: pass else: return importStarred = importStarred or scope.importStarred if in_generators is not False: in_generators = isinstance(scope, GeneratorScope) # look in the built-ins if name in self.builtIns: return if importStarred: from_list = [] for scope in self.scopeStack[-1::-1]: for binding in scope.values(): if isinstance(binding, StarImportation): # mark '*' imports as used for each scope binding.used = (self.scope, node) from_list.append(binding.fullName) # report * usage, with a list of possible sources from_list = ', '.join(sorted(from_list)) self.report(messages.ImportStarUsage, node, name, from_list) return if name == '__path__' and os.path.basename(self.filename) == '__init__.py': # the special name __path__ is valid only in packages return # protected with a NameError handler? if 'NameError' not in self.exceptHandlers[-1]: self.report(messages.UndefinedName, node, name) def handleNodeStore(self, node): name = getNodeName(node) if not name: return # if the name hasn't already been defined in the current scope if isinstance(self.scope, FunctionScope) and name not in self.scope: # for each function or module scope above us for scope in self.scopeStack[:-1]: if not isinstance(scope, (FunctionScope, ModuleScope)): continue # if the name was defined in that scope, and the name has # been accessed already in the current scope, and hasn't # been declared global used = name in scope and scope[name].used if used and used[0] is self.scope and name not in self.scope.globals: # then it's probably a mistake self.report(messages.UndefinedLocal, scope[name].used[1], name, scope[name].source) break parent_stmt = self.getParent(node) if isinstance(parent_stmt, (ast.For, ast.comprehension)) or ( parent_stmt != node.parent and not self.isLiteralTupleUnpacking(parent_stmt)): binding = Binding(name, node) elif name == '__all__' and isinstance(self.scope, ModuleScope): binding = ExportBinding(name, node.parent, self.scope) else: binding = Assignment(name, node) self.addBinding(node, binding) def handleNodeDelete(self, node): def on_conditional_branch(): """ Return `True` if node is part of a conditional body. """ current = getattr(node, 'parent', None) while current: if isinstance(current, (ast.If, ast.While, ast.IfExp)): return True current = getattr(current, 'parent', None) return False name = getNodeName(node) if not name: return if on_conditional_branch(): # We cannot predict if this conditional branch is going to # be executed. return if isinstance(self.scope, FunctionScope) and name in self.scope.globals: self.scope.globals.remove(name) else: try: del self.scope[name] except KeyError: self.report(messages.UndefinedName, node, name) def handleChildren(self, tree, omit=None): for node in iter_child_nodes(tree, omit=omit): self.handleNode(node, tree) def isLiteralTupleUnpacking(self, node): if isinstance(node, ast.Assign): for child in node.targets + [node.value]: if not hasattr(child, 'elts'): return False return True def isDocstring(self, node): """ Determine if the given node is a docstring, as long as it is at the correct place in the node tree. """ return isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and isinstance(node.value, ast.Str)) def getDocstring(self, node): if isinstance(node, ast.Expr): node = node.value if not isinstance(node, ast.Str): return (None, None) if PYPY: doctest_lineno = node.lineno - 1 else: # Computed incorrectly if the docstring has backslash doctest_lineno = node.lineno - node.s.count('\n') - 1 return (node.s, doctest_lineno) def handleNode(self, node, parent): if node is None: return if self.offset and getattr(node, 'lineno', None) is not None: node.lineno += self.offset[0] node.col_offset += self.offset[1] if self.traceTree: print(' ' * self.nodeDepth + node.__class__.__name__) if self.futuresAllowed and not (isinstance(node, ast.ImportFrom) or self.isDocstring(node)): self.futuresAllowed = False self.nodeDepth += 1 node.depth = self.nodeDepth node.parent = parent try: handler = self.getNodeHandler(node.__class__) handler(node) finally: self.nodeDepth -= 1 if self.traceTree: print(' ' * self.nodeDepth + 'end ' + node.__class__.__name__) _getDoctestExamples = doctest.DocTestParser().get_examples def handleDoctests(self, node): try: if hasattr(node, 'docstring'): docstring = node.docstring # This is just a reasonable guess. In Python 3.7, docstrings no # longer have line numbers associated with them. This will be # incorrect if there are empty lines between the beginning # of the function and the docstring. node_lineno = node.lineno if hasattr(node, 'args'): node_lineno = max([node_lineno] + [arg.lineno for arg in node.args.args]) else: (docstring, node_lineno) = self.getDocstring(node.body[0]) examples = docstring and self._getDoctestExamples(docstring) except (ValueError, IndexError): # e.g. line 6 of the docstring for <string> has inconsistent # leading whitespace: ... return if not examples: return # Place doctest in module scope saved_stack = self.scopeStack self.scopeStack = [self.scopeStack[0]] node_offset = self.offset or (0, 0) self.pushScope(DoctestScope) underscore_in_builtins = '_' in self.builtIns if not underscore_in_builtins: self.builtIns.add('_') for example in examples: try: tree = compile(example.source, "<doctest>", "exec", ast.PyCF_ONLY_AST) except SyntaxError: e = sys.exc_info()[1] if PYPY: e.offset += 1 position = (node_lineno + example.lineno + e.lineno, example.indent + 4 + (e.offset or 0)) self.report(messages.DoctestSyntaxError, node, position) else: self.offset = (node_offset[0] + node_lineno + example.lineno, node_offset[1] + example.indent + 4) self.handleChildren(tree) self.offset = node_offset if not underscore_in_builtins: self.builtIns.remove('_') self.popScope() self.scopeStack = saved_stack def ignore(self, node): pass # "stmt" type nodes DELETE = PRINT = FOR = ASYNCFOR = WHILE = IF = WITH = WITHITEM = \ ASYNCWITH = ASYNCWITHITEM = RAISE = TRYFINALLY = EXEC = \ EXPR = ASSIGN = handleChildren PASS = ignore # "expr" type nodes BOOLOP = BINOP = UNARYOP = IFEXP = SET = \ COMPARE = CALL = REPR = ATTRIBUTE = SUBSCRIPT = \ STARRED = NAMECONSTANT = handleChildren NUM = STR = BYTES = ELLIPSIS = ignore # "slice" type nodes SLICE = EXTSLICE = INDEX = handleChildren # expression contexts are node instances too, though being constants LOAD = STORE = DEL = AUGLOAD = AUGSTORE = PARAM = ignore # same for operators AND = OR = ADD = SUB = MULT = DIV = MOD = POW = LSHIFT = RSHIFT = \ BITOR = BITXOR = BITAND = FLOORDIV = INVERT = NOT = UADD = USUB = \ EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = \ MATMULT = ignore # additional node types COMPREHENSION = KEYWORD = FORMATTEDVALUE = JOINEDSTR = handleChildren def DICT(self, node): # Complain if there are duplicate keys with different values # If they have the same value it's not going to cause potentially # unexpected behaviour so we'll not complain. keys = [ convert_to_value(key) for key in node.keys ] key_counts = counter(keys) duplicate_keys = [ key for key, count in key_counts.items() if count > 1 ] for key in duplicate_keys: key_indices = [i for i, i_key in enumerate(keys) if i_key == key] values = counter( convert_to_value(node.values[index]) for index in key_indices ) if any(count == 1 for value, count in values.items()): for key_index in key_indices: key_node = node.keys[key_index] if isinstance(key, VariableKey): self.report(messages.MultiValueRepeatedKeyVariable, key_node, key.name) else: self.report( messages.MultiValueRepeatedKeyLiteral, key_node, key, ) self.handleChildren(node) def ASSERT(self, node): if isinstance(node.test, ast.Tuple) and node.test.elts != []: self.report(messages.AssertTuple, node) self.handleChildren(node) def GLOBAL(self, node): """ Keep track of globals declarations. """ global_scope_index = 1 if self._in_doctest() else 0 global_scope = self.scopeStack[global_scope_index] # Ignore 'global' statement in global scope. if self.scope is not global_scope: # One 'global' statement can bind multiple (comma-delimited) names. for node_name in node.names: node_value = Assignment(node_name, node) # Remove UndefinedName messages already reported for this name. # TODO: if the global is not used in this scope, it does not # become a globally defined name. See test_unused_global. self.messages = [ m for m in self.messages if not isinstance(m, messages.UndefinedName) or m.message_args[0] != node_name] # Bind name to global scope if it doesn't exist already. global_scope.setdefault(node_name, node_value) # Bind name to non-global scopes, but as already "used". node_value.used = (global_scope, node) for scope in self.scopeStack[global_scope_index + 1:]: scope[node_name] = node_value NONLOCAL = GLOBAL def GENERATOREXP(self, node): self.pushScope(GeneratorScope) self.handleChildren(node) self.popScope() LISTCOMP = handleChildren if PY2 else GENERATOREXP DICTCOMP = SETCOMP = GENERATOREXP def NAME(self, node): """ Handle occurrence of Name (which can be a load/store/delete access.) """ # Locate the name in locals / function / globals scopes. if isinstance(node.ctx, (ast.Load, ast.AugLoad)): self.handleNodeLoad(node) if (node.id == 'locals' and isinstance(self.scope, FunctionScope) and isinstance(node.parent, ast.Call)): # we are doing locals() call in current scope self.scope.usesLocals = True elif isinstance(node.ctx, (ast.Store, ast.AugStore)): self.handleNodeStore(node) elif isinstance(node.ctx, ast.Del): self.handleNodeDelete(node) else: # must be a Param context -- this only happens for names in function # arguments, but these aren't dispatched through here raise RuntimeError("Got impossible expression context: %r" % (node.ctx,)) def CONTINUE(self, node): # Walk the tree up until we see a loop (OK), a function or class # definition (not OK), for 'continue', a finally block (not OK), or # the top module scope (not OK) n = node while hasattr(n, 'parent'): n, n_child = n.parent, n if isinstance(n, LOOP_TYPES): # Doesn't apply unless it's in the loop itself if n_child not in n.orelse: return if isinstance(n, (ast.FunctionDef, ast.ClassDef)): break # Handle Try/TryFinally difference in Python < and >= 3.3 if hasattr(n, 'finalbody') and isinstance(node, ast.Continue): if n_child in n.finalbody: self.report(messages.ContinueInFinally, node) return if isinstance(node, ast.Continue): self.report(messages.ContinueOutsideLoop, node) else: # ast.Break self.report(messages.BreakOutsideLoop, node) BREAK = CONTINUE def RETURN(self, node): if isinstance(self.scope, (ClassScope, ModuleScope)): self.report(messages.ReturnOutsideFunction, node) return if ( node.value and hasattr(self.scope, 'returnValue') and not self.scope.returnValue ): self.scope.returnValue = node.value self.handleNode(node.value, node) def YIELD(self, node): if isinstance(self.scope, (ClassScope, ModuleScope)): self.report(messages.YieldOutsideFunction, node) return self.scope.isGenerator = True self.handleNode(node.value, node) AWAIT = YIELDFROM = YIELD def FUNCTIONDEF(self, node): for deco in node.decorator_list: self.handleNode(deco, node) self.LAMBDA(node) self.addBinding(node, FunctionDefinition(node.name, node)) # doctest does not process doctest within a doctest, # or in nested functions. if (self.withDoctest and not self._in_doctest() and not isinstance(self.scope, FunctionScope)): self.deferFunction(lambda: self.handleDoctests(node)) ASYNCFUNCTIONDEF = FUNCTIONDEF def LAMBDA(self, node): args = [] annotations = [] if PY2: def addArgs(arglist): for arg in arglist: if isinstance(arg, ast.Tuple): addArgs(arg.elts) else: args.append(arg.id) addArgs(node.args.args) defaults = node.args.defaults else: for arg in node.args.args + node.args.kwonlyargs: args.append(arg.arg) annotations.append(arg.annotation) defaults = node.args.defaults + node.args.kw_defaults # Only for Python3 FunctionDefs is_py3_func = hasattr(node, 'returns') for arg_name in ('vararg', 'kwarg'): wildcard = getattr(node.args, arg_name) if not wildcard: continue args.append(wildcard if PY33 else wildcard.arg) if is_py3_func: if PY33: # Python 2.5 to 3.3 argannotation = arg_name + 'annotation' annotations.append(getattr(node.args, argannotation)) else: # Python >= 3.4 annotations.append(wildcard.annotation) if is_py3_func: annotations.append(node.returns) if len(set(args)) < len(args): for (idx, arg) in enumerate(args): if arg in args[:idx]: self.report(messages.DuplicateArgument, node, arg) for child in annotations + defaults: if child: self.handleNode(child, node) def runFunction(): self.pushScope() for name in args: self.addBinding(node, Argument(name, node)) if isinstance(node.body, list): # case for FunctionDefs for stmt in node.body: self.handleNode(stmt, node) else: # case for Lambdas self.handleNode(node.body, node) def checkUnusedAssignments(): """ Check to see if any assignments have not been used. """ for name, binding in self.scope.unusedAssignments(): self.report(messages.UnusedVariable, binding.source, name) self.deferAssignment(checkUnusedAssignments) if PY32: def checkReturnWithArgumentInsideGenerator(): """ Check to see if there is any return statement with arguments but the function is a generator. """ if self.scope.isGenerator and self.scope.returnValue: self.report(messages.ReturnWithArgsInsideGenerator, self.scope.returnValue) self.deferAssignment(checkReturnWithArgumentInsideGenerator) self.popScope() self.deferFunction(runFunction) def CLASSDEF(self, node): """ Check names used in a class definition, including its decorators, base classes, and the body of its definition. Additionally, add its name to the current scope. """ for deco in node.decorator_list: self.handleNode(deco, node) for baseNode in node.bases: self.handleNode(baseNode, node) if not PY2: for keywordNode in node.keywords: self.handleNode(keywordNode, node) self.pushScope(ClassScope) # doctest does not process doctest within a doctest # classes within classes are processed. if (self.withDoctest and not self._in_doctest() and not isinstance(self.scope, FunctionScope)): self.deferFunction(lambda: self.handleDoctests(node)) for stmt in node.body: self.handleNode(stmt, node) self.popScope() self.addBinding(node, ClassDefinition(node.name, node)) def AUGASSIGN(self, node): self.handleNodeLoad(node.target) self.handleNode(node.value, node) self.handleNode(node.target, node) def TUPLE(self, node): if not PY2 and isinstance(node.ctx, ast.Store): # Python 3 advanced tuple unpacking: a, *b, c = d. # Only one starred expression is allowed, and no more than 1<<8 # assignments are allowed before a stared expression. There is # also a limit of 1<<24 expressions after the starred expression, # which is impossible to test due to memory restrictions, but we # add it here anyway has_starred = False star_loc = -1 for i, n in enumerate(node.elts): if isinstance(n, ast.Starred): if has_starred: self.report(messages.TwoStarredExpressions, node) # The SyntaxError doesn't distinguish two from more # than two. break has_starred = True star_loc = i if star_loc >= 1 << 8 or len(node.elts) - star_loc - 1 >= 1 << 24: self.report(messages.TooManyExpressionsInStarredAssignment, node) self.handleChildren(node) LIST = TUPLE def IMPORT(self, node): for alias in node.names: if '.' in alias.name and not alias.asname: importation = SubmoduleImportation(alias.name, node) else: name = alias.asname or alias.name importation = Importation(name, node, alias.name) self.addBinding(node, importation) def IMPORTFROM(self, node): if node.module == '__future__': if not self.futuresAllowed: self.report(messages.LateFutureImport, node, [n.name for n in node.names]) else: self.futuresAllowed = False module = ('.' * node.level) + (node.module or '') for alias in node.names: name = alias.asname or alias.name if node.module == '__future__': importation = FutureImportation(name, node, self.scope) if alias.name not in __future__.all_feature_names: self.report(messages.FutureFeatureNotDefined, node, alias.name) elif alias.name == '*': # Only Python 2, local import * is a SyntaxWarning if not PY2 and not isinstance(self.scope, ModuleScope): self.report(messages.ImportStarNotPermitted, node, module) continue self.scope.importStarred = True self.report(messages.ImportStarUsed, node, module) importation = StarImportation(module, node) else: importation = ImportationFrom(name, node, module, alias.name) self.addBinding(node, importation) def TRY(self, node): handler_names = [] # List the exception handlers for i, handler in enumerate(node.handlers): if isinstance(handler.type, ast.Tuple): for exc_type in handler.type.elts: handler_names.append(getNodeName(exc_type)) elif handler.type: handler_names.append(getNodeName(handler.type)) if handler.type is None and i < len(node.handlers) - 1: self.report(messages.DefaultExceptNotLast, handler) # Memorize the except handlers and process the body self.exceptHandlers.append(handler_names) for child in node.body: self.handleNode(child, node) self.exceptHandlers.pop() # Process the other nodes: "except:", "else:", "finally:" self.handleChildren(node, omit='body') TRYEXCEPT = TRY def EXCEPTHANDLER(self, node): if PY2 or node.name is None: self.handleChildren(node) return # 3.x: the name of the exception, which is not a Name node, but # a simple string, creates a local that is only bound within the scope # of the except: block. for scope in self.scopeStack[::-1]: if node.name in scope: is_name_previously_defined = True break else: is_name_previously_defined = False self.handleNodeStore(node) self.handleChildren(node) if not is_name_previously_defined: # See discussion on https://github.com/PyCQA/pyflakes/pull/59 # We're removing the local name since it's being unbound # after leaving the except: block and it's always unbound # if the except: block is never entered. This will cause an # "undefined name" error raised if the checked code tries to # use the name afterwards. # # Unless it's been removed already. Then do nothing. try: del self.scope[node.name] except KeyError: pass def ANNASSIGN(self, node): if node.value: # Only bind the *targets* if the assignment has a value. # Otherwise it's not really ast.Store and shouldn't silence # UndefinedLocal warnings. self.handleNode(node.target, node) self.handleNode(node.annotation, node) if node.value: # If the assignment has value, handle the *value* now. self.handleNode(node.value, node)
2.265625
2
AutoScreenShot.py
infinyte7/Auto-Screenshot
3
985
# Project Name: Auto Screenshot # Description: Take screenshot of screen when any change take place. # Author: Mani (Infinyte7) # Date: 26-10-2020 # License: MIT from pyscreenshot import grab from PIL import ImageChops import os import time import subprocess, sys from datetime import datetime import tkinter as tk from tkinter import * from tkinter import font class AutoScreenshot: def __init__(self, master): self.root = root root.title('Auto Screenshot') root.config(bg="white") fontRoboto = font.Font(family='Roboto', size=16, weight='bold') # project name label projectTitleLabel = Label(root, text="Auto Screenshot v1.0.0") projectTitleLabel.config(font=fontRoboto, bg="white", fg="#5599ff") projectTitleLabel.pack(padx="10") # start button btn_start = Button(root, text="Start", command=self.start) btn_start.config(highlightthickness=0, bd=0, fg="white", bg="#5fd38d", activebackground="#5fd38d", activeforeground="white", font=fontRoboto) btn_start.pack(padx="10", fill=BOTH) # close button btn_start = Button(root, text="Close", command=self.close) btn_start.config(highlightthickness=0, bd=0, fg="white", bg="#f44336", activebackground="#ff7043", activeforeground="white", font=fontRoboto) btn_start.pack(padx="10", pady="10", fill=BOTH) def start(self): # Create folder to store images directory = "Screenshots" self.new_folder = directory + "/" + datetime.now().strftime("%Y_%m_%d-%I_%M_%p") # all images to one folder if not os.path.exists(directory): os.makedirs(directory) # new folder for storing images for current session if not os.path.exists(self.new_folder): os.makedirs(self.new_folder) # Run ScreenCords.py and get cordinates cords_point = subprocess.check_output([sys.executable, "GetScreenCoordinates.py", "-l"]) cord_tuple = tuple(cords_point.decode("utf-8").rstrip().split(",")) # cordinates for screenshots and compare self.cords = (int(cord_tuple[0]), int(cord_tuple[1]), int(cord_tuple[2]), int(cord_tuple[3])) # save first image img1 = grab(bbox=self.cords) now = datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p") fname = self.new_folder + "/ScreenShots" + now + ".png" img1.save(fname) print("First Screenshot taken") # start taking screenshot of next images self.take_screenshots() def take_screenshots(self): # grab first and second image img1 = grab(bbox=self.cords) time.sleep(1) img2 = grab(bbox=self.cords) # check difference between images diff = ImageChops.difference(img1, img2) bbox = diff.getbbox() if bbox is not None: now = datetime.now().strftime("%Y_%m_%d-%I_%M_%S_%p") fname = self.new_folder + "/ScreenShots" + now + ".png" img2.save(fname) print("Screenshot taken") root.after(5, self.take_screenshots) def close(self): quit() if __name__ == "__main__": root = Tk() gui = AutoScreenshot(root) root.mainloop()
3.21875
3
rqalpha/utils/logger.py
HaidongHe/rqalpha
1
986
<reponame>HaidongHe/rqalpha # -*- coding: utf-8 -*- # 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”) # # 除非遵守当前许可,否则不得使用本软件。 # # * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件): # 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。 # 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。 # # * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件): # 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。 # 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。 # 详细的授权流程,请联系 <EMAIL> 获取。 from datetime import datetime import logbook from logbook import Logger, StderrHandler from rqalpha.utils.py2 import to_utf8 logbook.set_datetime_format("local") # patch warn logbook.base._level_names[logbook.base.WARNING] = 'WARN' __all__ = [ "user_log", "system_log", "user_system_log", ] DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f" def user_std_handler_log_formatter(record, handler): from rqalpha.environment import Environment try: dt = Environment.get_instance().calendar_dt.strftime(DATETIME_FORMAT) except Exception: dt = datetime.now().strftime(DATETIME_FORMAT) log = "{dt} {level} {msg}".format( dt=dt, level=record.level_name, msg=to_utf8(record.message), ) return log user_std_handler = StderrHandler(bubble=True) user_std_handler.formatter = user_std_handler_log_formatter def formatter_builder(tag): def formatter(record, handler): log = "[{formatter_tag}] [{time}] {level}: {msg}".format( formatter_tag=tag, level=record.level_name, msg=to_utf8(record.message), time=record.time, ) if record.formatted_exception: log += "\n" + record.formatted_exception return log return formatter # loggers # 用户代码logger日志 user_log = Logger("user_log") # 给用户看的系统日志 user_system_log = Logger("user_system_log") # 用于用户异常的详细日志打印 user_detail_log = Logger("user_detail_log") # user_detail_log.handlers.append(StderrHandler(bubble=True)) # 系统日志 system_log = Logger("system_log") basic_system_log = Logger("basic_system_log") # 标准输出日志 std_log = Logger("std_log") def init_logger(): system_log.handlers = [StderrHandler(bubble=True)] basic_system_log.handlers = [StderrHandler(bubble=True)] std_log.handlers = [StderrHandler(bubble=True)] user_log.handlers = [] user_system_log.handlers = [] def user_print(*args, **kwargs): sep = kwargs.get("sep", " ") end = kwargs.get("end", "") message = sep.join(map(str, args)) + end user_log.info(message) init_logger()
1.523438
2
salt/modules/oracle.py
wikimedia/operations-debs-salt
0
987
# -*- coding: utf-8 -*- ''' Oracle DataBase connection module :mainteiner: <NAME> <<EMAIL>> :maturity: new :depends: cx_Oracle :platform: all :configuration: module provide connections for multiple Oracle DB instances. **OS Environment** .. code-block:: text ORACLE_HOME: path to oracle product PATH: path to Oracle Client libs need to be in PATH **pillar** .. code-block:: text oracle.dbs: list of known based oracle.dbs.<db>.uri: connection credentials in format: user/password@host[:port]/sid[ as {sysdba|sysoper}] ''' import os import logging from salt.utils.decorators import depends log = logging.getLogger(__name__) try: import cx_Oracle MODE = { 'sysdba': cx_Oracle.SYSDBA, 'sysoper': cx_Oracle.SYSOPER } HAS_CX_ORACLE = True except ImportError: MODE = {'sysdba': 2, 'sysoper': 4} HAS_CX_ORACLE = False __virtualname__ = 'oracle' def __virtual__(): ''' Load module only if cx_Oracle installed ''' return __virtualname__ if HAS_CX_ORACLE else False def _cx_oracle_req(): ''' Fallback function stub ''' return 'Need "cx_Oracle" and Oracle Client installed for this functin exist' def _unicode_output(cursor, name, default_type, size, precision, scale): ''' Return strings values as python unicode string http://www.oracle.com/technetwork/articles/dsl/tuininga-cx-oracle-084866.html ''' if default_type in (cx_Oracle.STRING, cx_Oracle.LONG_STRING, cx_Oracle.FIXED_CHAR, cx_Oracle.CLOB): return cursor.var(unicode, size, cursor.arraysize) def _connect(uri): ''' uri = user/password@host[:port]/sid[ as {sysdba|sysoper}] Return cx_Oracle.Connection instance ''' # cx_Oracle.Connection() not support 'as sysdba' syntax uri_l = uri.rsplit(' as ', 1) if len(uri_l) == 2: credentials, mode = uri_l mode = MODE[mode] else: credentials = uri_l[0] mode = 0 userpass, hostportsid = credentials.split('@') user, password = userpass.split('/') hostport, sid = hostportsid.split('/') hostport_l = hostport.split(':') if len(hostport_l) == 2: host, port = hostport_l else: host = hostport_l[0] port = 1521 log.debug('connect: {0}'.format((user, password, host, port, sid, mode))) # force UTF-8 client encoding os.environ['NLS_LANG'] = '.AL32UTF8' conn = cx_Oracle.connect(user, password, cx_Oracle.makedsn(host, port, sid), mode) conn.outputtypehandler = _unicode_output return conn @depends('cx_Oracle', fallback_function=_cx_oracle_req) def run_query(db, query): ''' Run SQL query and return result CLI example: .. code-block:: bash salt '*' oracle.run_query my_db "select * from my_table" ''' log.debug('run query on {0}: {1}'.format(db, query)) conn = _connect(show_dbs(db)[db]['uri']) return conn.cursor().execute(query).fetchall() def show_dbs(*dbs): ''' Show databases configuration from pillar. Filter by args .. code-block:: bash salt '*' oracle.show_dbs salt '*' oracle.show_dbs my_db ''' if dbs: log.debug('get dbs from pillar: {0}'.format(dbs)) result = {} for db in dbs: result[db] = __salt__['pillar.get']('oracle:dbs:' + db) return result else: pillar_dbs = __salt__['pillar.get']('oracle:dbs') log.debug('get all ({0}) dbs from pillar'.format(len(pillar_dbs))) return pillar_dbs @depends('cx_Oracle', fallback_function=_cx_oracle_req) def version(*dbs): ''' Server Version (select banner from v$version) CLI Example: .. code-block:: bash salt '*' oracle.version salt '*' oracle.version my_db ''' pillar_dbs = __salt__['pillar.get']('oracle:dbs') get_version = lambda x: [ r[0] for r in run_query(x, "select banner from v$version order by banner") ] result = {} if dbs: log.debug('get db versions for: {0}'.format(dbs)) for db in dbs: if db in pillar_dbs: result[db] = get_version(db) else: log.debug('get all({0}) dbs versions'.format(len(dbs))) for db in dbs: result[db] = get_version(db) return result @depends('cx_Oracle', fallback_function=_cx_oracle_req) def client_version(): ''' Oracle Client Version CLI Example: .. code-block:: bash salt '*' oracle.client_version ''' return '.'.join((str(x) for x in cx_Oracle.clientversion())) def show_pillar(item=None): ''' Show Pillar segment oracle.* and subitem with notation "item:subitem" CLI Example: .. code-block:: bash salt '*' oracle.show_pillar salt '*' oracle.show_pillar dbs:my_db ''' if item: return __salt__['pillar.get']('oracle:' + item) else: return __salt__['pillar.get']('oracle') def show_env(): ''' Show Environment used by Oracle Client CLI Example: .. code-block:: bash salt '*' oracle.show_env .. note:: at first _connect() ``NLS_LANG`` will forced to '.AL32UTF8' ''' envs = ['PATH', 'ORACLE_HOME', 'TNS_ADMIN', 'NLS_LANG'] result = {} for env in envs: if env in os.environ: result[env] = os.environ[env] return result
2.328125
2
tests/test_std.py
ashwini-balnaves/python-consul
469
988
<filename>tests/test_std.py import base64 import operator import struct import time import pytest import six import consul import consul.std Check = consul.Check class TestHTTPClient(object): def test_uri(self): http = consul.std.HTTPClient() assert http.uri('/v1/kv') == 'http://127.0.0.1:8500/v1/kv' assert http.uri('/v1/kv', params={'index': 1}) == \ 'http://127.0.0.1:8500/v1/kv?index=1' class TestConsul(object): def test_kv(self, consul_port): c = consul.Consul(port=consul_port) index, data = c.kv.get('foo') assert data is None assert c.kv.put('foo', 'bar') is True index, data = c.kv.get('foo') assert data['Value'] == six.b('bar') def test_kv_wait(self, consul_port): c = consul.Consul(port=consul_port) assert c.kv.put('foo', 'bar') is True index, data = c.kv.get('foo') check, data = c.kv.get('foo', index=index, wait='20ms') assert index == check def test_kv_encoding(self, consul_port): c = consul.Consul(port=consul_port) # test binary c.kv.put('foo', struct.pack('i', 1000)) index, data = c.kv.get('foo') assert struct.unpack('i', data['Value']) == (1000,) # test unicode c.kv.put('foo', u'bar') index, data = c.kv.get('foo') assert data['Value'] == six.b('bar') # test empty-string comes back as `None` c.kv.put('foo', '') index, data = c.kv.get('foo') assert data['Value'] is None # test None c.kv.put('foo', None) index, data = c.kv.get('foo') assert data['Value'] is None # check unencoded values raises assert pytest.raises(AssertionError, c.kv.put, 'foo', {1: 2}) def test_kv_put_cas(self, consul_port): c = consul.Consul(port=consul_port) assert c.kv.put('foo', 'bar', cas=50) is False assert c.kv.put('foo', 'bar', cas=0) is True index, data = c.kv.get('foo') assert c.kv.put('foo', 'bar2', cas=data['ModifyIndex']-1) is False assert c.kv.put('foo', 'bar2', cas=data['ModifyIndex']) is True index, data = c.kv.get('foo') assert data['Value'] == six.b('bar2') def test_kv_put_flags(self, consul_port): c = consul.Consul(port=consul_port) c.kv.put('foo', 'bar') index, data = c.kv.get('foo') assert data['Flags'] == 0 assert c.kv.put('foo', 'bar', flags=50) is True index, data = c.kv.get('foo') assert data['Flags'] == 50 def test_kv_recurse(self, consul_port): c = consul.Consul(port=consul_port) index, data = c.kv.get('foo/', recurse=True) assert data is None c.kv.put('foo/', None) index, data = c.kv.get('foo/', recurse=True) assert len(data) == 1 c.kv.put('foo/bar1', '1') c.kv.put('foo/bar2', '2') c.kv.put('foo/bar3', '3') index, data = c.kv.get('foo/', recurse=True) assert [x['Key'] for x in data] == [ 'foo/', 'foo/bar1', 'foo/bar2', 'foo/bar3'] assert [x['Value'] for x in data] == [ None, six.b('1'), six.b('2'), six.b('3')] def test_kv_delete(self, consul_port): c = consul.Consul(port=consul_port) c.kv.put('foo1', '1') c.kv.put('foo2', '2') c.kv.put('foo3', '3') index, data = c.kv.get('foo', recurse=True) assert [x['Key'] for x in data] == ['foo1', 'foo2', 'foo3'] assert c.kv.delete('foo2') is True index, data = c.kv.get('foo', recurse=True) assert [x['Key'] for x in data] == ['foo1', 'foo3'] assert c.kv.delete('foo', recurse=True) is True index, data = c.kv.get('foo', recurse=True) assert data is None def test_kv_delete_cas(self, consul_port): c = consul.Consul(port=consul_port) c.kv.put('foo', 'bar') index, data = c.kv.get('foo') assert c.kv.delete('foo', cas=data['ModifyIndex']-1) is False assert c.kv.get('foo') == (index, data) assert c.kv.delete('foo', cas=data['ModifyIndex']) is True index, data = c.kv.get('foo') assert data is None def test_kv_acquire_release(self, consul_port): c = consul.Consul(port=consul_port) pytest.raises( consul.ConsulException, c.kv.put, 'foo', 'bar', acquire='foo') s1 = c.session.create() s2 = c.session.create() assert c.kv.put('foo', '1', acquire=s1) is True assert c.kv.put('foo', '2', acquire=s2) is False assert c.kv.put('foo', '1', acquire=s1) is True assert c.kv.put('foo', '1', release='foo') is False assert c.kv.put('foo', '2', release=s2) is False assert c.kv.put('foo', '2', release=s1) is True c.session.destroy(s1) c.session.destroy(s2) def test_kv_keys_only(self, consul_port): c = consul.Consul(port=consul_port) assert c.kv.put('bar', '4') is True assert c.kv.put('base/foo', '1') is True assert c.kv.put('base/base/foo', '5') is True index, data = c.kv.get('base/', keys=True, separator='/') assert data == ['base/base/', 'base/foo'] def test_transaction(self, consul_port): c = consul.Consul(port=consul_port) value = base64.b64encode(b"1").decode("utf8") d = {"KV": {"Verb": "set", "Key": "asdf", "Value": value}} r = c.txn.put([d]) assert r["Errors"] is None d = {"KV": {"Verb": "get", "Key": "asdf"}} r = c.txn.put([d]) assert r["Results"][0]["KV"]["Value"] == value def test_event(self, consul_port): c = consul.Consul(port=consul_port) assert c.event.fire("fooname", "foobody") index, events = c.event.list() assert [x['Name'] == 'fooname' for x in events] assert [x['Payload'] == 'foobody' for x in events] def test_event_targeted(self, consul_port): c = consul.Consul(port=consul_port) assert c.event.fire("fooname", "foobody") index, events = c.event.list(name="othername") assert events == [] index, events = c.event.list(name="fooname") assert [x['Name'] == 'fooname' for x in events] assert [x['Payload'] == 'foobody' for x in events] def test_agent_checks(self, consul_port): c = consul.Consul(port=consul_port) def verify_and_dereg_check(check_id): assert set(c.agent.checks().keys()) == set([check_id]) assert c.agent.check.deregister(check_id) is True assert set(c.agent.checks().keys()) == set([]) def verify_check_status(check_id, status, notes=None): checks = c.agent.checks() assert checks[check_id]['Status'] == status if notes: assert checks[check_id]['Output'] == notes # test setting notes on a check c.agent.check.register('check', Check.ttl('1s'), notes='foo') assert c.agent.checks()['check']['Notes'] == 'foo' c.agent.check.deregister('check') assert set(c.agent.checks().keys()) == set([]) assert c.agent.check.register( 'script_check', Check.script('/bin/true', 10)) is True verify_and_dereg_check('script_check') assert c.agent.check.register( 'check name', Check.script('/bin/true', 10), check_id='check_id') is True verify_and_dereg_check('check_id') http_addr = "http://127.0.0.1:{0}".format(consul_port) assert c.agent.check.register( 'http_check', Check.http(http_addr, '10ms')) is True time.sleep(1) verify_check_status('http_check', 'passing') verify_and_dereg_check('http_check') assert c.agent.check.register( 'http_timeout_check', Check.http(http_addr, '100ms', timeout='2s')) is True verify_and_dereg_check('http_timeout_check') assert c.agent.check.register('ttl_check', Check.ttl('100ms')) is True assert c.agent.check.ttl_warn('ttl_check') is True verify_check_status('ttl_check', 'warning') assert c.agent.check.ttl_warn( 'ttl_check', notes='its not quite right') is True verify_check_status('ttl_check', 'warning', 'its not quite right') assert c.agent.check.ttl_fail('ttl_check') is True verify_check_status('ttl_check', 'critical') assert c.agent.check.ttl_fail( 'ttl_check', notes='something went boink!') is True verify_check_status( 'ttl_check', 'critical', notes='something went boink!') assert c.agent.check.ttl_pass('ttl_check') is True verify_check_status('ttl_check', 'passing') assert c.agent.check.ttl_pass( 'ttl_check', notes='all hunky dory!') is True verify_check_status('ttl_check', 'passing', notes='all hunky dory!') # wait for ttl to expire time.sleep(120/1000.0) verify_check_status('ttl_check', 'critical') verify_and_dereg_check('ttl_check') def test_service_dereg_issue_156(self, consul_port): # https://github.com/cablehead/python-consul/issues/156 service_name = 'app#127.0.0.1#3000' c = consul.Consul(port=consul_port) c.agent.service.register(service_name) time.sleep(80/1000.0) index, nodes = c.health.service(service_name) assert [node['Service']['ID'] for node in nodes] == [service_name] # Clean up tasks assert c.agent.service.deregister(service_name) is True time.sleep(40/1000.0) index, nodes = c.health.service(service_name) assert [node['Service']['ID'] for node in nodes] == [] def test_agent_checks_service_id(self, consul_port): c = consul.Consul(port=consul_port) c.agent.service.register('foo1') time.sleep(40/1000.0) index, nodes = c.health.service('foo1') assert [node['Service']['ID'] for node in nodes] == ['foo1'] c.agent.check.register('foo', Check.ttl('100ms'), service_id='foo1') time.sleep(40/1000.0) index, nodes = c.health.service('foo1') assert set([ check['ServiceID'] for node in nodes for check in node['Checks']]) == set(['foo1', '']) assert set([ check['CheckID'] for node in nodes for check in node['Checks']]) == set(['foo', 'serfHealth']) # Clean up tasks assert c.agent.check.deregister('foo') is True time.sleep(40/1000.0) assert c.agent.service.deregister('foo1') is True time.sleep(40/1000.0) def test_agent_register_check_no_service_id(self, consul_port): c = consul.Consul(port=consul_port) index, nodes = c.health.service("foo1") assert nodes == [] pytest.raises(consul.std.base.ConsulException, c.agent.check.register, 'foo', Check.ttl('100ms'), service_id='foo1') time.sleep(40/1000.0) assert c.agent.checks() == {} # Cleanup tasks c.agent.check.deregister('foo') time.sleep(40/1000.0) def test_agent_register_enable_tag_override(self, consul_port): c = consul.Consul(port=consul_port) index, nodes = c.health.service("foo1") assert nodes == [] c.agent.service.register('foo', enable_tag_override=True) assert c.agent.services()['foo']['EnableTagOverride'] # Cleanup tasks c.agent.check.deregister('foo') def test_agent_service_maintenance(self, consul_port): c = consul.Consul(port=consul_port) c.agent.service.register('foo', check=Check.ttl('100ms')) time.sleep(40/1000.0) c.agent.service.maintenance('foo', 'true', "test") time.sleep(40/1000.0) checks_pre = c.agent.checks() assert '_service_maintenance:foo' in checks_pre.keys() assert 'test' == checks_pre['_service_maintenance:foo']['Notes'] c.agent.service.maintenance('foo', 'false') time.sleep(40/1000.0) checks_post = c.agent.checks() assert '_service_maintenance:foo' not in checks_post.keys() # Cleanup c.agent.service.deregister('foo') time.sleep(40/1000.0) def test_agent_node_maintenance(self, consul_port): c = consul.Consul(port=consul_port) c.agent.maintenance('true', "test") time.sleep(40/1000.0) checks_pre = c.agent.checks() assert '_node_maintenance' in checks_pre.keys() assert 'test' == checks_pre['_node_maintenance']['Notes'] c.agent.maintenance('false') time.sleep(40/1000.0) checks_post = c.agent.checks() assert '_node_maintenance' not in checks_post.keys() def test_agent_members(self, consul_port): c = consul.Consul(port=consul_port) members = c.agent.members() for x in members: assert x['Status'] == 1 assert not x['Name'] is None assert not x['Tags'] is None assert c.agent.self()['Member'] in members wan_members = c.agent.members(wan=True) for x in wan_members: assert 'dc1' in x['Name'] def test_agent_self(self, consul_port): c = consul.Consul(port=consul_port) assert set(c.agent.self().keys()) == set(['Member', 'Stats', 'Config', 'Coord', 'DebugConfig', 'Meta']) def test_agent_services(self, consul_port): c = consul.Consul(port=consul_port) assert c.agent.service.register('foo') is True assert set(c.agent.services().keys()) == set(['foo']) assert c.agent.service.deregister('foo') is True assert set(c.agent.services().keys()) == set() # test address param assert c.agent.service.register('foo', address='10.10.10.1') is True assert [ v['Address'] for k, v in c.agent.services().items() if k == 'foo'][0] == '10.10.10.1' assert c.agent.service.deregister('foo') is True def test_catalog(self, consul_port): c = consul.Consul(port=consul_port) # grab the node our server created, so we can ignore it _, nodes = c.catalog.nodes() assert len(nodes) == 1 current = nodes[0] # test catalog.datacenters assert c.catalog.datacenters() == ['dc1'] # test catalog.register pytest.raises( consul.ConsulException, c.catalog.register, 'foo', '10.1.10.11', dc='dc2') assert c.catalog.register( 'n1', '10.1.10.11', service={'service': 's1'}, check={'name': 'c1'}) is True assert c.catalog.register( 'n1', '10.1.10.11', service={'service': 's2'}) is True assert c.catalog.register( 'n2', '10.1.10.12', service={'service': 's1', 'tags': ['master']}) is True # test catalog.nodes pytest.raises(consul.ConsulException, c.catalog.nodes, dc='dc2') _, nodes = c.catalog.nodes() nodes.remove(current) assert [x['Node'] for x in nodes] == ['n1', 'n2'] # test catalog.services pytest.raises(consul.ConsulException, c.catalog.services, dc='dc2') _, services = c.catalog.services() assert services == {'s1': [u'master'], 's2': [], 'consul': []} # test catalog.node pytest.raises(consul.ConsulException, c.catalog.node, 'n1', dc='dc2') _, node = c.catalog.node('n1') assert set(node['Services'].keys()) == set(['s1', 's2']) _, node = c.catalog.node('n3') assert node is None # test catalog.service pytest.raises( consul.ConsulException, c.catalog.service, 's1', dc='dc2') _, nodes = c.catalog.service('s1') assert set([x['Node'] for x in nodes]) == set(['n1', 'n2']) _, nodes = c.catalog.service('s1', tag='master') assert set([x['Node'] for x in nodes]) == set(['n2']) # test catalog.deregister pytest.raises( consul.ConsulException, c.catalog.deregister, 'n2', dc='dc2') assert c.catalog.deregister('n1', check_id='c1') is True assert c.catalog.deregister('n2', service_id='s1') is True # check the nodes weren't removed _, nodes = c.catalog.nodes() nodes.remove(current) assert [x['Node'] for x in nodes] == ['n1', 'n2'] # check n2's s1 service was removed though _, nodes = c.catalog.service('s1') assert set([x['Node'] for x in nodes]) == set(['n1']) # cleanup assert c.catalog.deregister('n1') is True assert c.catalog.deregister('n2') is True _, nodes = c.catalog.nodes() nodes.remove(current) assert [x['Node'] for x in nodes] == [] def test_health_service(self, consul_port): c = consul.Consul(port=consul_port) # check there are no nodes for the service 'foo' index, nodes = c.health.service('foo') assert nodes == [] # register two nodes, one with a long ttl, the other shorter c.agent.service.register( 'foo', service_id='foo:1', check=Check.ttl('10s'), tags=['tag:foo:1']) c.agent.service.register( 'foo', service_id='foo:2', check=Check.ttl('100ms')) time.sleep(40/1000.0) # check the nodes show for the /health/service endpoint index, nodes = c.health.service('foo') assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2'] # but that they aren't passing their health check index, nodes = c.health.service('foo', passing=True) assert nodes == [] # ping the two node's health check c.agent.check.ttl_pass('service:foo:1') c.agent.check.ttl_pass('service:foo:2') time.sleep(40/1000.0) # both nodes are now available index, nodes = c.health.service('foo', passing=True) assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2'] # wait until the short ttl node fails time.sleep(120/1000.0) # only one node available index, nodes = c.health.service('foo', passing=True) assert [node['Service']['ID'] for node in nodes] == ['foo:1'] # ping the failed node's health check c.agent.check.ttl_pass('service:foo:2') time.sleep(40/1000.0) # check both nodes are available index, nodes = c.health.service('foo', passing=True) assert [node['Service']['ID'] for node in nodes] == ['foo:1', 'foo:2'] # check that tag works index, nodes = c.health.service('foo', tag='tag:foo:1') assert [node['Service']['ID'] for node in nodes] == ['foo:1'] # deregister the nodes c.agent.service.deregister('foo:1') c.agent.service.deregister('foo:2') time.sleep(40/1000.0) index, nodes = c.health.service('foo') assert nodes == [] def test_health_state(self, consul_port): c = consul.Consul(port=consul_port) # The empty string is for the Serf Health Status check, which has an # empty ServiceID index, nodes = c.health.state('any') assert [node['ServiceID'] for node in nodes] == [''] # register two nodes, one with a long ttl, the other shorter c.agent.service.register( 'foo', service_id='foo:1', check=Check.ttl('10s')) c.agent.service.register( 'foo', service_id='foo:2', check=Check.ttl('100ms')) time.sleep(40/1000.0) # check the nodes show for the /health/state/any endpoint index, nodes = c.health.state('any') assert set([node['ServiceID'] for node in nodes]) == set( ['', 'foo:1', 'foo:2']) # but that they aren't passing their health check index, nodes = c.health.state('passing') assert [node['ServiceID'] for node in nodes] != 'foo' # ping the two node's health check c.agent.check.ttl_pass('service:foo:1') c.agent.check.ttl_pass('service:foo:2') time.sleep(40/1000.0) # both nodes are now available index, nodes = c.health.state('passing') assert set([node['ServiceID'] for node in nodes]) == set( ['', 'foo:1', 'foo:2']) # wait until the short ttl node fails time.sleep(2200/1000.0) # only one node available index, nodes = c.health.state('passing') assert set([node['ServiceID'] for node in nodes]) == set( ['', 'foo:1']) # ping the failed node's health check c.agent.check.ttl_pass('service:foo:2') time.sleep(40/1000.0) # check both nodes are available index, nodes = c.health.state('passing') assert set([node['ServiceID'] for node in nodes]) == set( ['', 'foo:1', 'foo:2']) # deregister the nodes c.agent.service.deregister('foo:1') c.agent.service.deregister('foo:2') time.sleep(40/1000.0) index, nodes = c.health.state('any') assert [node['ServiceID'] for node in nodes] == [''] def test_health_node(self, consul_port): c = consul.Consul(port=consul_port) # grab local node name node = c.agent.self()['Config']['NodeName'] index, checks = c.health.node(node) assert node in [check["Node"] for check in checks] def test_health_checks(self, consul_port): c = consul.Consul(port=consul_port) c.agent.service.register( 'foobar', service_id='foobar', check=Check.ttl('10s')) time.sleep(40/1000.00) index, checks = c.health.checks('foobar') assert [check['ServiceID'] for check in checks] == ['foobar'] assert [check['CheckID'] for check in checks] == ['service:foobar'] c.agent.service.deregister('foobar') time.sleep(40/1000.0) index, checks = c.health.checks('foobar') assert len(checks) == 0 def test_session(self, consul_port): c = consul.Consul(port=consul_port) # session.create pytest.raises(consul.ConsulException, c.session.create, node='n2') pytest.raises(consul.ConsulException, c.session.create, dc='dc2') session_id = c.session.create('my-session') # session.list pytest.raises(consul.ConsulException, c.session.list, dc='dc2') _, sessions = c.session.list() assert [x['Name'] for x in sessions] == ['my-session'] # session.info pytest.raises( consul.ConsulException, c.session.info, session_id, dc='dc2') index, session = c.session.info('1'*36) assert session is None index, session = c.session.info(session_id) assert session['Name'] == 'my-session' # session.node node = session['Node'] pytest.raises( consul.ConsulException, c.session.node, node, dc='dc2') _, sessions = c.session.node(node) assert [x['Name'] for x in sessions] == ['my-session'] # session.destroy pytest.raises( consul.ConsulException, c.session.destroy, session_id, dc='dc2') assert c.session.destroy(session_id) is True _, sessions = c.session.list() assert sessions == [] def test_session_delete_ttl_renew(self, consul_port): c = consul.Consul(port=consul_port) s = c.session.create(behavior='delete', ttl=20) # attempt to renew an unknown session pytest.raises(consul.NotFound, c.session.renew, '1'*36) session = c.session.renew(s) assert session['Behavior'] == 'delete' assert session['TTL'] == '20s' # trying out the behavior assert c.kv.put('foo', '1', acquire=s) is True index, data = c.kv.get('foo') assert data['Value'] == six.b('1') c.session.destroy(s) index, data = c.kv.get('foo') assert data is None def test_acl_disabled(self, consul_port): c = consul.Consul(port=consul_port) pytest.raises(consul.ACLDisabled, c.acl.list) pytest.raises(consul.ACLDisabled, c.acl.info, '1'*36) pytest.raises(consul.ACLDisabled, c.acl.create) pytest.raises(consul.ACLDisabled, c.acl.update, 'foo') pytest.raises(consul.ACLDisabled, c.acl.clone, 'foo') pytest.raises(consul.ACLDisabled, c.acl.destroy, 'foo') def test_acl_permission_denied(self, acl_consul): c = consul.Consul(port=acl_consul.port) pytest.raises(consul.ACLPermissionDenied, c.acl.list) pytest.raises(consul.ACLPermissionDenied, c.acl.create) pytest.raises(consul.ACLPermissionDenied, c.acl.update, 'anonymous') pytest.raises(consul.ACLPermissionDenied, c.acl.clone, 'anonymous') pytest.raises(consul.ACLPermissionDenied, c.acl.destroy, 'anonymous') def test_acl_explict_token_use(self, acl_consul): c = consul.Consul(port=acl_consul.port) master_token = acl_consul.token acls = c.acl.list(token=master_token) assert set([x['ID'] for x in acls]) == \ set(['anonymous', master_token]) assert c.acl.info('1'*36) is None compare = [c.acl.info(master_token), c.acl.info('anonymous')] compare.sort(key=operator.itemgetter('ID')) assert acls == compare rules = """ key "" { policy = "read" } key "private/" { policy = "deny" } service "foo-" { policy = "write" } service "bar-" { policy = "read" } """ token = c.acl.create(rules=rules, token=master_token) assert c.acl.info(token)['Rules'] == rules token2 = c.acl.clone(token, token=master_token) assert c.acl.info(token2)['Rules'] == rules assert c.acl.update(token2, name='Foo', token=master_token) == token2 assert c.acl.info(token2)['Name'] == 'Foo' assert c.acl.destroy(token2, token=master_token) is True assert c.acl.info(token2) is None c.kv.put('foo', 'bar') c.kv.put('private/foo', 'bar') assert c.kv.get('foo', token=token)[1]['Value'] == six.b('bar') pytest.raises( consul.ACLPermissionDenied, c.kv.put, 'foo', 'bar2', token=token) pytest.raises( consul.ACLPermissionDenied, c.kv.delete, 'foo', token=token) assert c.kv.get('private/foo')[1]['Value'] == six.b('bar') pytest.raises( consul.ACLPermissionDenied, c.kv.get, 'private/foo', token=token) pytest.raises( consul.ACLPermissionDenied, c.kv.put, 'private/foo', 'bar2', token=token) pytest.raises( consul.ACLPermissionDenied, c.kv.delete, 'private/foo', token=token) # test token pass through for service registration pytest.raises( consul.ACLPermissionDenied, c.agent.service.register, "bar-1", token=token) c.agent.service.register("foo-1", token=token) index, data = c.health.service('foo-1', token=token) assert data[0]['Service']['ID'] == "foo-1" index, data = c.health.checks('foo-1', token=token) assert data == [] index, data = c.health.service('bar-1', token=token) assert not data # clean up assert c.agent.service.deregister('foo-1') is True c.acl.destroy(token, token=master_token) acls = c.acl.list(token=master_token) assert set([x['ID'] for x in acls]) == \ set(['anonymous', master_token]) def test_acl_implicit_token_use(self, acl_consul): # configure client to use the master token by default c = consul.Consul(port=acl_consul.port, token=acl_consul.token) master_token = acl_consul.token acls = c.acl.list() assert set([x['ID'] for x in acls]) == \ set(['anonymous', master_token]) assert c.acl.info('foo') is None compare = [c.acl.info(master_token), c.acl.info('anonymous')] compare.sort(key=operator.itemgetter('ID')) assert acls == compare rules = """ key "" { policy = "read" } key "private/" { policy = "deny" } """ token = c.acl.create(rules=rules) assert c.acl.info(token)['Rules'] == rules token2 = c.acl.clone(token) assert c.acl.info(token2)['Rules'] == rules assert c.acl.update(token2, name='Foo') == token2 assert c.acl.info(token2)['Name'] == 'Foo' assert c.acl.destroy(token2) is True assert c.acl.info(token2) is None c.kv.put('foo', 'bar') c.kv.put('private/foo', 'bar') c_limited = consul.Consul(port=acl_consul.port, token=token) assert c_limited.kv.get('foo')[1]['Value'] == six.b('bar') pytest.raises( consul.ACLPermissionDenied, c_limited.kv.put, 'foo', 'bar2') pytest.raises( consul.ACLPermissionDenied, c_limited.kv.delete, 'foo') assert c.kv.get('private/foo')[1]['Value'] == six.b('bar') pytest.raises( consul.ACLPermissionDenied, c_limited.kv.get, 'private/foo') pytest.raises( consul.ACLPermissionDenied, c_limited.kv.put, 'private/foo', 'bar2') pytest.raises( consul.ACLPermissionDenied, c_limited.kv.delete, 'private/foo') # check we can override the client's default token pytest.raises( consul.ACLPermissionDenied, c.kv.get, 'private/foo', token=token ) pytest.raises( consul.ACLPermissionDenied, c.kv.put, 'private/foo', 'bar2', token=token) pytest.raises( consul.ACLPermissionDenied, c.kv.delete, 'private/foo', token=token) # clean up c.acl.destroy(token) acls = c.acl.list() assert set([x['ID'] for x in acls]) == \ set(['anonymous', master_token]) def test_status_leader(self, consul_port): c = consul.Consul(port=consul_port) agent_self = c.agent.self() leader = c.status.leader() addr_port = agent_self['Stats']['consul']['leader_addr'] assert leader == addr_port, \ "Leader value was {0}, expected value " \ "was {1}".format(leader, addr_port) def test_status_peers(self, consul_port): c = consul.Consul(port=consul_port) agent_self = c.agent.self() addr_port = agent_self['Stats']['consul']['leader_addr'] peers = c.status.peers() assert addr_port in peers, \ "Expected value '{0}' " \ "in peer list but it was not present".format(addr_port) def test_query(self, consul_port): c = consul.Consul(port=consul_port) # check that query list is empty queries = c.query.list() assert queries == [] # create a new named query query_service = 'foo' query_name = 'fooquery' query = c.query.create(query_service, query_name) # assert response contains query ID assert 'ID' in query \ and query['ID'] is not None \ and str(query['ID']) != '' # retrieve query using id and name queries = c.query.get(query['ID']) assert queries != [] \ and len(queries) == 1 assert queries[0]['Name'] == query_name \ and queries[0]['ID'] == query['ID'] # explain query assert c.query.explain(query_name)['Query'] # delete query assert c.query.delete(query['ID']) def test_coordinate(self, consul_port): c = consul.Consul(port=consul_port) c.coordinate.nodes() c.coordinate.datacenters() assert set(c.coordinate.datacenters()[0].keys()) == \ set(['Datacenter', 'Coordinates', 'AreaID']) def test_operator(self, consul_port): c = consul.Consul(port=consul_port) config = c.operator.raft_config() assert config["Index"] == 1 leader = False voter = False for server in config["Servers"]: if server["Leader"]: leader = True if server["Voter"]: voter = True assert leader assert voter
2.28125
2
Q58/sol.py
shivamT95/projecteuler
0
989
<filename>Q58/sol.py import math def is_prime(n): if n == 1: return False if n % 2 == 0 and n > 2: return False return all(n % i for i in range(3, int(math.sqrt(n))+1,2)) tot = 1 dia = 0 for side_length in range(3,100001,2): hi = side_length**2 for i in range(4): if is_prime(hi-i*side_length+i): dia = dia+1 tot = tot+4 if dia/tot < 0.1: print(side_length) break
3.390625
3
lesson_07/02.py
alexartwww/geekbrains
0
990
task = ''' Реализовать проект расчета суммарного расхода ткани на производство одежды. Основная сущность (класс) этого проекта — одежда, которая может иметь определенное название. К типам одежды в этом проекте относятся пальто и костюм. У этих типов одежды существуют параметры: размер (для пальто) и рост (для костюма). Это могут быть обычные числа: V и H, соответственно. Для определения расхода ткани по каждому типу одежды использовать формулы: для пальто (V/6.5 + 0.5), для костюма (2 * H + 0.3). Проверить работу этих методов на реальных данных. Реализовать общий подсчет расхода ткани. Проверить на практике полученные на этом уроке знания: реализовать абстрактные классы для основных классов проекта, проверить на практике работу декоратора @property. ''' class Clothes: @property def need_material(self): raise NotImplementedError("Необходимо переопределить метод") class Costume(Clothes): def __init__(self, v): self.v = v @property def need_material(self): return (self.v / 6.5 + 0.5) class Coat(Clothes): def __init__(self, h): self.h = h @property def need_material(self): return (2 * self.h + 0.3) if __name__ == '__main__': print(task) objects = [ 231, 22, Coat(32), 'test', True, Costume(87), Coat(32) ] need_material = 0 for obj in objects: if isinstance(obj, Clothes): need_material += obj.need_material print(need_material)
2.5625
3
core/fanarttvapi.py
SchadLucas/pyscrape
0
991
<gh_stars>0 import urllib2 import json import time from core.helpers.decorator import Cached from core.helpers.config import config from core.helpers.logger import log, LogLevel @Cached def __request(request): log('Send Fanart Request: ' + request.replace(config.fanart.api_key, 'XXX'), 'DEBUG') headers = {'Accept': 'application/json'} _request = urllib2.Request(request, headers=headers) response_body = urllib2.urlopen(_request).read() result = json.loads(response_body) return result def _get(video_type, movie_id, output_format='JSON'): req = '{0}{1}/{2}/{3}/{4}'.format(config.fanart.url_base, video_type, config.fanart.api_key, movie_id, output_format) try_again = True n = 0 while try_again and n < 10: try: return __request(req) except urllib2.HTTPError: n += 1 try_again = True log('Ooops.. FanartTV Error - Try again', LogLevel.Warning) time.sleep(2) def get_movie(tmdb_id): return _get(video_type='movie', movie_id=tmdb_id) def get_show(tvdb_id): return _get(video_type='series', movie_id=tvdb_id)
2.453125
2
metrics.py
AndreasLH/Image-Colourization
1
992
<reponame>AndreasLH/Image-Colourization from math import log10, sqrt import cv2 import numpy as np def PSNR(original, compressed): ''' Calculates the Peak signal to noise ratio between a ground truth image and predicted image. see https://www.geeksforgeeks.org/python-peak-signal-to-noise-ratio-psnr/ for reference Parameters ---------- true image (cv2 image) predicted image (cv2 image) Returns ------- PSNR score ''' mse = np.mean((original - compressed) ** 2) if(mse == 0): # MSE is zero means no noise is present in the signal . # Therefore PSNR have no importance. return 100 max_pixel = 255.0 psnr = 20 * log10(max_pixel / sqrt(mse)) return psnr def colourfulnessMetric(img): """ Created on Mon Nov 15 10:55:16 2021 @author: Yucheng Parameters ---------- img : cv2 RGB image Returns ------- M : colourness metric ----------------------------- |not colourful | 0 | |slightly colorful | 15 | |moderately colourful | 33 | |averagely colourful | 45 | |quite colourful | 59 | |highly colourful | 82 | |extremely colourful | 109 | ----------------------------- """ # Get RGB components R,G,B = cv2.split(img.astype("float")) # colourfulness metric from Hasler et al., section 7 rg = R - G yb = (1/2) * (R+G) - B sigma_rgyb = np.sqrt(np.var(rg) + np.var(yb)) mu_rgyb = np.sqrt(np.mean(rg)**2 + np.mean(yb)**2) M = sigma_rgyb + 0.3 * mu_rgyb return M def main(): import matplotlib.pyplot as plt original = cv2.imread("test_imgs/original_image.png") compressed = cv2.imread("test_imgs/compressed_image1.png", 1) value = PSNR(original, compressed) print(f"PSNR value is {value} dB") img2 = cv2.imread("rainbow.jpg") # opens as BGR img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB) plt.imshow(img2[:,:,:]) plt.show() M = colourfulnessMetric(img2) print(M) if __name__ == "__main__": main()
3.5625
4
redirink/insights/tests/test_models.py
Egor4ik325/redirink
0
993
<filename>redirink/insights/tests/test_models.py """Test insight model is working the way it should.""" import pytest from django.core.exceptions import ValidationError from django.db import DataError from .factories import InsightFactory pytestmark = pytest.mark.django_db def test_create_new_fake_visitor_instance_using_factory(visitor): pass def test_create_new_instance_using_model_factory(insight): pass def test_fake_instance_is_valid(insight): # Should not raise ValidationError insight.full_clean() def test_fake_instance_have_right_fields(insight): assert isinstance(insight.id, int) assert insight.time is not None def test_invalid_ip_address(): with pytest.raises(DataError): InsightFactory(visitor__ip_address="invalid ip") def test_valid_fake_ip_v6_address(faker): InsightFactory(visitor__ip_address=faker.ipv6())
2.265625
2
scripts/list-all-test-suites-for-ci.py
uc-cdis/gen3-qa
4
994
<reponame>uc-cdis/gen3-qa<gh_stars>1-10 import os import subprocess test_suites_that_cant_run_in_parallel = [ "test-apis-dbgapTest", # not thread-safe "test-google-googleDataAccessTest", # not thread-safe "test-google-googleServiceAccountRemovalTest", # not thread-safe "test-guppy-guppyTest", # not thread-safe "test-smokeTests-brainTests", # manual (executable test) "test-batch-GoogleBucketManifestGenerationTest", # @donot "test-batch-S3BucketManifestGenerationTest", # @donot "test-portal-dataguidOrgTest", # @donot "test-mariner-marinerIntegrationTest", # @donot "test-suites-fail", # special suite to force failures for invalid test labels "test-portal-roleBasedUITest", # manual (executable test) "test-portal-limitedFilePFBExportTestPlan", # manual (executable test) "test-access-accessGUITest", # manual (executable test) "test-portal-tieredAccessTest", # manual (executable test) "test-portal-discoveryPageTestPlan", # manual (executable test) "test-portal-dashboardReportsTest", # manual (executable test) "test-guppy-nestedAggTest", # manual (executable test) "test-portal-404pageTest", # manual (executable test) "test-apis-dcfDataReplicationTest", # manual (executable test) "test-portal-exportPfbToWorkspaceTest", # manual (executable test) "test-portal-homepageChartNodesExecutableTestPlan",# manual (executable test) "test-portal-profilePageTest", # manual (executable test) "test-portal-terraExportWarningTestPlan", # manual (executable test) "test-pelican-exportPfbTest", # not ready "test-regressions-exportPerformanceTest", # legacy (disabled test) "test-regressions-generateTestData", # legacy (disabled test) "test-regressions-queryPerformanceTest", # legacy (disabled test) "test-regressions-submissionPerformanceTest", # legacy (disabled test) "test-dream-challenge-DCgen3clientTest", # legacy (disabled test) "test-dream-challenge-synapaseLoginTest", # legacy (disabled test) "test-prod-checkAllProjectsBucketAccessTest", # prod test "test-portal-pfbExportTest", # nightly build test "test-apis-etlTest", # long-running test "test-apis-centralizedAuth", # long-running test "test-google-googleServiceAccountTest", # long-running test "test-google-googleServiceAccountKeyTest", # long-running test "test-portal-dataUploadTest", # SUPER long-running test "test-portal-indexingPageTest", # long-running test "test-apis-metadataIngestionTest", # long-running test "test-apis-auditServiceTest" # long-running test ] def collect_test_suites_from_codeceptjs_dryrun(): my_env = os.environ.copy() bashCommand = "npx codeceptjs dry-run" process = subprocess.Popen( bashCommand.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env ) output, error = process.communicate() test_suites = [] for line in output.splitlines(): line = line.decode("utf-8") # print(f'### line: {line}') # ignore pre-release test suites if "pre-release" in line: continue elif ".js" in line: full_path_to_test_js = line.split("/") suite_folder = full_path_to_test_js[-2] # print(f'## suite_folder: {suite_folder}') test_script = full_path_to_test_js[-1] # print(f'## test_script: {test_script}') test_script_without_extension = test_script[0 : test_script.index(".")] test_suite = f"test-{suite_folder}-{test_script_without_extension}" test_suites.append(test_suite) return test_suites def main(): test_suites = collect_test_suites_from_codeceptjs_dryrun() for ts in test_suites: if ts not in test_suites_that_cant_run_in_parallel: print(ts) # print(f"## ## test_suites: {test_suites}") # print(f"## test_suites size: {len(test_suites)}") if __name__ == "__main__": main()
1.632813
2
querybook/server/lib/query_executor/all_executors.py
set5think/querybook
1
995
from lib.utils.plugin import import_plugin from .base_executor import parse_exception from .executors.hive import HiveQueryExecutor from .executors.presto import PrestoQueryExecutor from .executors.sqlalchemy import ( MysqlQueryExecutor, DruidQueryExecutor, SqliteQueryExecutor, SnowflakeQueryExecutor, ) from .executors.bigquery import BigQueryQueryExecutor ALL_PLUGIN_EXECUTORS = import_plugin("executor_plugin", "ALL_PLUGIN_EXECUTORS", []) ALL_EXECUTORS = [ HiveQueryExecutor, PrestoQueryExecutor, MysqlQueryExecutor, DruidQueryExecutor, SqliteQueryExecutor, BigQueryQueryExecutor, SnowflakeQueryExecutor, ] + ALL_PLUGIN_EXECUTORS def get_executor_class(language: str, name: str): for executor in ALL_EXECUTORS: if ( executor.EXECUTOR_LANGUAGE() == language and executor.EXECUTOR_NAME() == name ): return executor raise ValueError(f"Unknown executor {name} with language {language}") # Re-export parse_exception parse_exception
2.109375
2
bot/exts/info/pypi.py
MrGrote/bot
1
996
<filename>bot/exts/info/pypi.py import itertools import random import re from contextlib import suppress from disnake import Embed, NotFound from disnake.ext.commands import Cog, Context, command from disnake.utils import escape_markdown from bot.bot import Bot from bot.constants import Colours, NEGATIVE_REPLIES, RedirectOutput from bot.log import get_logger from bot.utils.messages import wait_for_deletion URL = "https://pypi.org/pypi/{package}/json" PYPI_ICON = "https://cdn.discordapp.com/emojis/766274397257334814.png" PYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white)) ILLEGAL_CHARACTERS = re.compile(r"[^-_.a-zA-Z0-9]+") INVALID_INPUT_DELETE_DELAY = RedirectOutput.delete_delay log = get_logger(__name__) class PyPi(Cog): """Cog for getting information about PyPi packages.""" def __init__(self, bot: Bot): self.bot = bot @command(name="pypi", aliases=("package", "pack", "pip")) async def get_package_info(self, ctx: Context, package: str) -> None: """Provide information about a specific package from PyPI.""" embed = Embed(title=random.choice(NEGATIVE_REPLIES), colour=Colours.soft_red) embed.set_thumbnail(url=PYPI_ICON) error = True if characters := re.search(ILLEGAL_CHARACTERS, package): embed.description = f"Illegal character(s) passed into command: '{escape_markdown(characters.group(0))}'" else: async with self.bot.http_session.get(URL.format(package=package)) as response: if response.status == 404: embed.description = "Package could not be found." elif response.status == 200 and response.content_type == "application/json": response_json = await response.json() info = response_json["info"] embed.title = f"{info['name']} v{info['version']}" embed.url = info["package_url"] embed.colour = next(PYPI_COLOURS) summary = escape_markdown(info["summary"]) # Summary could be completely empty, or just whitespace. if summary and not summary.isspace(): embed.description = summary else: embed.description = "No summary provided." error = False else: embed.description = "There was an error when fetching your PyPi package." log.trace(f"Error when fetching PyPi package: {response.status}.") if error: error_message = await ctx.send(embed=embed) await wait_for_deletion(error_message, (ctx.author.id,), timeout=INVALID_INPUT_DELETE_DELAY) # Make sure that we won't cause a ghost-ping by deleting the message if not (ctx.message.mentions or ctx.message.role_mentions): with suppress(NotFound): await ctx.message.delete() await error_message.delete() else: await ctx.send(embed=embed) def setup(bot: Bot) -> None: """Load the PyPi cog.""" bot.add_cog(PyPi(bot))
2.328125
2
app/decorators.py
GinnyGaga/lanbo
0
997
from functools import wraps from flask import abort from flask_login import current_user from .models import Permission <<<<<<< HEAD def permission_required(permission): def decorator(f): @wraps(f) def decorated_function(*args, **kwargs): if not current_user.can(permission): abort(403) return f(*args, **kwargs) return decorated_function return decorator def admin_required(f): return permission_required(Permission.ADMINISTER)(f) ======= def permission_required(permission): def decorator(f): @wraps(f) def decorated_function(*args,**kwargs): if not current_user.can(permission): abort(403) return f(*args,**kwargs) return decorated_function return decorator def admin_required(f): return permission_required(Permission.ADMINISTER)(f) >>>>>>> 17-app-1
2.34375
2
jnpy/experiments/Qt/pyqtgraph_tutorial/codeloop_org_materials/c4_drawing_curves.py
jojoquant/jnpy
5
998
<reponame>jojoquant/jnpy # !/usr/bin/env python3 # -*- coding:utf-8 -*- # @Datetime : 2019/11/14 上午2:26 # @Author : Fangyang # @Software : PyCharm import sys from PyQt5.QtWidgets import QApplication import pyqtgraph as pg import numpy as np app = QApplication(sys.argv) x = np.arange(1000) y = np.random.normal(size=(3, 1000)) plotWidget = pg.plot(title='Three plot curves') for i in range(3): plotWidget.plot(x, y[i], pen=(i, 3)) status = app.exec_() sys.exit(status) if __name__ == '__main__': pass
2.296875
2
compyle/api.py
nauaneed/compyle
0
999
from .array import Array, wrap from .ast_utils import (get_symbols, get_assigned, get_unknown_names_and_calls, has_return, has_node) from .config import get_config, set_config, use_config, Config from .cython_generator import ( CythonGenerator, get_func_definition ) from .ext_module import ExtModule from .extern import Extern from .low_level import Kernel, LocalMem, Cython, cast from .parallel import ( Elementwise, Reduction, Scan, elementwise ) from .profile import ( get_profile_info, named_profile, profile, profile_ctx, print_profile, profile_kernel, ProfileContext, profile2csv ) from .translator import ( CConverter, CStructHelper, OpenCLConverter, detect_type, ocl_detect_type, py2c ) from .types import KnownType, annotate, declare from .utils import ArgumentParser
1.46875
1