content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from typing import List
import argparse
import chart_studio.plotly as py
import plotly.express as px
import pandas as pd
class TokyoCovid19Stat:
"""Holds Tokyo Covid-19 stat data."""
def __init__(self, csv_file_path: str = None):
self.csv_file_path = csv_file_path
self._df = None
self.area_list = []
def update(self) -> None:
df = pd.read_csv(self.csv_file_path,
parse_dates=['Date'])
for area in df['Area']:
if area in self.area_list:
break
self.area_list.append(area)
df = df.pivot(index='Date', columns='Area', values='New Cases')
self._df = df[self.area_list]
@property
def df(self) -> pd.DataFrame:
if self._df is None:
self.update()
return self._df
@property
def cases_by_area(self) -> pd.DataFrame:
return self.df
@property
def cases(self) -> pd.DataFrame:
return pd.DataFrame({'Cases': self.cases_by_area.sum(axis=1)})
def sma(df: pd.DataFrame, days: int = 7) -> pd.DataFrame:
return df.rolling(days).mean()
def with_date(orig_df: pd.DataFrame) -> pd.DataFrame:
df = orig_df.copy()
df['Date'] = df.index.to_list()
return df
def melt(orig_df: pd.DataFrame,
value_columns: List[str],
var_name: str,
value_name: str = 'Cases') -> pd.DataFrame:
"""Unpivot the given DataFrame to be used with Plotly."""
df = with_date(orig_df)
df = df[['Date'] + value_columns]
return df.melt(id_vars=['Date'],
value_vars=value_columns,
var_name=var_name,
value_name=value_name)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--csv_file_path')
args = parser.parse_args()
if args.csv_file_path is None:
return
st = TokyoCovid19Stat(args.csv_file_path)
cases_by_area = melt(st.cases_by_area,
value_columns=st.area_list,
var_name='Area')
sma_by_area = melt(sma(st.cases_by_area),
value_columns=st.area_list,
var_name='Area')
# title = 'Tokyo Covid-19 New Cases By Area'
# fig = px.area(cases_by_area, x='Date', y='Cases', color='Area', title=title)
# py.plot(fig, filename=title, auto_open=False)
title = '[TEST] Tokyo Covid-19 New Cases 7-day Moving Average By Area'
fig = px.line(sma_by_area, x='Date', y='Cases', color='Area', title=title)
fig.add_bar(x=st.cases.index,
y=st.cases['Cases'],
name='Raw Total',
marker=dict(color='#dddddd'))
py.plot(fig, filename=title, auto_open=False)
if __name__ == '__main__':
main()
| 28.628866 | 82 | 0.593806 | [
"MIT"
] | kazush/tokyo_covid19_stat | stat_by_area.py | 2,777 | Python |
import os
import librosa.display as lbd
import matplotlib.pyplot as plt
import sounddevice
import soundfile
import torch
from InferenceInterfaces.InferenceArchitectures.InferenceHiFiGAN import HiFiGANGenerator
from InferenceInterfaces.InferenceArchitectures.InferenceTacotron2 import Tacotron2
from Preprocessing.TextFrontend import TextFrontend
class Nancy_Tacotron2(torch.nn.Module):
def __init__(self, device="cpu", speaker_embedding=None):
super().__init__()
self.speaker_embedding = None
self.device = device
self.text2phone = TextFrontend(language="en", use_word_boundaries=False,
use_explicit_eos=False, inference=True)
self.phone2mel = Tacotron2(path_to_weights=os.path.join("Models", "Tacotron2_Nancy", "best.pt"),
idim=166, odim=80, spk_embed_dim=None, reduction_factor=1).to(torch.device(device))
self.mel2wav = HiFiGANGenerator(path_to_weights=os.path.join("Models", "HiFiGAN_combined", "best.pt")).to(torch.device(device))
self.phone2mel.eval()
self.mel2wav.eval()
self.to(torch.device(device))
def forward(self, text, view=False):
with torch.no_grad():
phones = self.text2phone.string_to_tensor(text).squeeze(0).long().to(torch.device(self.device))
mel = self.phone2mel(phones, speaker_embedding=self.speaker_embedding).transpose(0, 1)
wave = self.mel2wav(mel)
if view:
fig, ax = plt.subplots(nrows=2, ncols=1)
ax[0].plot(wave.cpu().numpy())
lbd.specshow(mel.cpu().numpy(), ax=ax[1], sr=16000, cmap='GnBu', y_axis='mel', x_axis='time', hop_length=256)
ax[0].set_title(self.text2phone.get_phone_string(text))
ax[0].yaxis.set_visible(False)
ax[1].yaxis.set_visible(False)
plt.subplots_adjust(left=0.05, bottom=0.1, right=0.95, top=.9, wspace=0.0, hspace=0.0)
plt.show()
return wave
def read_to_file(self, text_list, file_location, silent=False):
"""
:param silent: Whether to be verbose about the process
:param text_list: A list of strings to be read
:param file_location: The path and name of the file it should be saved to
"""
wav = None
silence = torch.zeros([24000])
for text in text_list:
if text.strip() != "":
if not silent:
print("Now synthesizing: {}".format(text))
if wav is None:
wav = self(text).cpu()
wav = torch.cat((wav, silence), 0)
else:
wav = torch.cat((wav, self(text).cpu()), 0)
wav = torch.cat((wav, silence), 0)
soundfile.write(file=file_location, data=wav.cpu().numpy(), samplerate=48000)
def read_aloud(self, text, view=False, blocking=False):
if text.strip() == "":
return
wav = self(text, view).cpu()
wav = torch.cat((wav, torch.zeros([24000])), 0)
if not blocking:
sounddevice.play(wav.numpy(), samplerate=48000)
else:
sounddevice.play(torch.cat((wav, torch.zeros([12000])), 0).numpy(), samplerate=48000)
sounddevice.wait()
def plot_attention(self, sentence):
sentence_tensor = self.text2phone.string_to_tensor(sentence).squeeze(0).long().to(torch.device(self.device))
att = self.phone2mel(text=sentence_tensor, speaker_embedding=self.speaker_embedding, return_atts=True)
fig, axes = plt.subplots(nrows=1, ncols=1)
axes.imshow(att.detach().numpy(), interpolation='nearest', aspect='auto', origin="lower")
axes.set_title("{}".format(sentence))
axes.xaxis.set_visible(False)
axes.yaxis.set_visible(False)
plt.tight_layout()
plt.show()
| 44.793103 | 135 | 0.622274 | [
"Apache-2.0"
] | DigitalPhonetics/IMS-Toucan | InferenceInterfaces/Nancy_Tacotron2.py | 3,897 | Python |
# -*- coding: UTF-8 -*-
# Copyright 2002-2019 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""
See :ref:`lino` for non-technical documentation.
The :mod:`lino` package itself is the first plugin for all Lino
applications, added automatically to your :setting:`INSTALLED_APPS`. It defines
no models, but some template files, django admin commands, translation messages
and the core :xfile:`help_texts.py` file.
The :mod:`lino` package is the root for the subpackages that define core
functionalites:
.. autosummary::
:toctree:
core
hello
api
utils
mixins
projects
modlib
sphinxcontrib
management.commands
"""
# from __future__ import unicode_literals
# from __future__ import absolute_import
# from builtins import str
import sys
import os
from os.path import join, dirname
from .setup_info import SETUP_INFO
__version__ = SETUP_INFO['version']
intersphinx_urls = dict(docs="http://core.lino-framework.org")
srcref_url = 'https://github.com/lino-framework/lino/blob/master/%s'
# srcref_url = 'https://github.com/lino-framework/lino/tree/master/%s'
doc_trees = ['docs']
if sys.version_info[0] > 2:
PYAFTER26 = True
elif sys.version_info[0] == 2 and sys.version_info[1] > 6:
PYAFTER26 = True
else:
PYAFTER26 = False
import warnings
warnings.filterwarnings(
"error", "DateTimeField .* received a naive datetime (.*) while time zone support is active.",
RuntimeWarning, "django.db.models.fields")
from django.conf import settings
from django.apps import AppConfig
# def setup_project(settings_module):
# os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
# from lino.api.shell import settings
DJANGO_DEFAULT_LANGUAGE = 'en-us'
def assert_django_code(django_code):
if '_' in django_code:
raise Exception("Invalid language code %r. "
"Use values like 'en' or 'en-us'." % django_code)
from django import VERSION
AFTER17 = True
AFTER18 = True
DJANGO2 = True
if VERSION[0] == 1:
DJANGO2 = False
if VERSION[1] < 10:
raise Exception("Unsupported Django version %s" % VERSION)
# if VERSION[1] > 6:
# AFTER17 = True
# if VERSION[1] > 8:
# AFTER18 = True
elif VERSION[0] == 2:
AFTER17 = True
AFTER18 = True
else:
raise Exception("Unsupported Django version %s" % VERSION)
def startup(settings_module=None):
"""
Start up Django and Lino.
Optional `settings_module` is the name of a Django settings
module. If this is specified, set the
:envvar:`DJANGO_SETTINGS_MODULE` environment variable.
This is called automatically when a process is invoked by an
*admin command*.
In a document to be tested using :cmd:`doctest` you need to call
it manually using e.g.:
>>> import lino
>>> lino.startup('my.project.settings')
Above two lines are recommended over the old-style method (the
only one only until Django 1.6)::
>>> import os
>>> os.environ['DJANGO_SETTINGS_MODULE'] = 'my.project.settings'
"""
if settings_module:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
import django
django.setup()
class AppConfig(AppConfig):
"""This is the only :class:`django.apps.AppConfig` object used by
Lino.
Lino applications use the :class:`lino.core.plugins.Plugin`
because it has some additional functionality.
"""
name = 'lino'
def ready(self):
if False:
settings.SITE.startup()
else:
try:
settings.SITE.startup()
except ImportError as e:
import traceback
# traceback.print_exc(e)
# sys.exit(-1)
raise Exception("ImportError during startup:\n" +
traceback.format_exc(e))
except Exception as e:
print(e)
raise
default_app_config = 'lino.AppConfig'
# deprecated use, only for backwards compat:
from django.utils.translation import ugettext_lazy as _
| 25.670886 | 98 | 0.66716 | [
"BSD-2-Clause"
] | NewRGB/lino | lino/__init__.py | 4,056 | Python |
import decimal
from threading import local
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.utils import datetime_safe
from django.utils.importlib import import_module
class BaseDatabaseWrapper(local):
"""
Represents a database connection.
"""
ops = None
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
def __eq__(self, other):
return self.settings_dict == other.settings_dict
def __ne__(self, other):
return not self == other
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
from django.conf import settings
cursor = self._cursor()
if settings.DEBUG:
return self.make_debug_cursor(cursor)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
interprets_empty_strings_as_nulls = False
can_use_chunked_reads = True
can_return_id_from_insert = False
uses_autocommit = False
uses_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self):
self._cache = {}
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if compiler_name not in self._cache:
self._cache[compiler_name] = getattr(
import_module(self.compiler_module), compiler_name
)
return self._cache[compiler_name]
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return datetime_safe.new_date(value).strftime('%Y-%m-%d')
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplemented.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self):
"Returns a list of names of all tables that exist in the database."
cursor = self.connection.cursor()
return self.get_table_list(cursor)
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
if only_existing:
tables = [t for t in tables if self.table_name_converter(t) in self.table_names()]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
return set([m for m in all_models
if self.table_name_converter(m._meta.db_table) in map(self.table_name_converter, tables)
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
| 35.82069 | 102 | 0.639632 | [
"BSD-3-Clause"
] | t11e/django | django/db/backends/__init__.py | 20,776 | Python |
import os
import subprocess
import requests
import re
import sys
esports_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, esports_dir)
from utils import key, check_exists_other, analyze_subtitles
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
data_directory_pattern = r'D:\Data\speech\esports\valorant\{}\game_changers'
raw_data_directory = data_directory_pattern.format('raw')
other_places = [data_directory_pattern.format(x) for x in ['cleaned', 'segmented', 'to_transcribe', 'transcribed']]
os.makedirs(raw_data_directory, exist_ok=True)
channel_id = 'Raidiant'
playlist_ids = [
'PL51yT_X4kWdt1xpelG89oepg_db7J3bso',
]
playlist_names = {
'PL51yT_X4kWdt1xpelG89oepg_db7J3bso': 'Game Changers NA',
}
def download_audios():
for playlist_id in playlist_ids:
cursor = None
while True:
if not cursor:
url = 'https://www.googleapis.com/youtube/v3/playlistItems?key={}&playlistId={}&part=snippet,id&order=date&maxResults=50'.format(
key, playlist_id)
else:
url = 'https://www.googleapis.com/youtube/v3/playlistItems?key={}&playlistId={}&part=snippet,id&order=date&maxResults=50&pageToken={}'.format(
key, playlist_id, cursor)
response = requests.get(url)
data = response.json()
print(data)
for item in data['items']:
try:
v_id = item['snippet']['resourceId']['videoId']
except KeyError:
continue
url = 'https://www.youtube.com/watch?v=' + v_id
if item['snippet']['title'].lower().startswith('overwatch league 2021'):
continue
d = item['snippet']['publishedAt'].split('T')[0]
title = re.sub(r'[^-\w _]', '', item['snippet']['title'])
new_id = '{} - {}'.format(playlist_names[playlist_id], title).replace('|', '-').replace(' ', '_')
out_template = '{}.%(ext)s'.format(new_id)
audio_file = '{}.flac'.format(new_id)
if check_exists_other(audio_file, other_places):
continue
audio_path = os.path.join(raw_data_directory, audio_file)
if not os.path.exists(audio_path):
video_path = os.path.join(raw_data_directory, '{}.m4a'.format(new_id))
subprocess.call(['youtube-dl', "-f 140", '--write-auto-sub',
'--sub-format', 'vtt', '--sub-lang', 'en',
'-o', out_template,
url, ], cwd=raw_data_directory)
subprocess.call(['ffmpeg', '-i', video_path, '-vn', '-map_channel', '0.0.0', '-c:a', 'flac', '-sample_fmt', 's16', '-ar', '16000', audio_path])
if os.path.exists(video_path):
os.remove(video_path)
vtt_path = os.path.join(raw_data_directory, '{}.en.vtt'.format(new_id))
tg_path = vtt_path.replace('.en.vtt', '.TextGrid')
if not os.path.exists(tg_path):
if not os.path.exists(vtt_path):
subprocess.call(['youtube-dl', '--write-auto-sub',
'--skip-download',
'--sub-format', 'vtt', '--sub-lang', 'en',
'-o', out_template,
url, ], cwd=raw_data_directory)
if os.path.exists(vtt_path):
analyze_subtitles(vtt_path)
try:
cursor = data['nextPageToken']
except KeyError:
break
if __name__ == '__main__':
download_audios() | 46.392857 | 163 | 0.537336 | [
"CC0-1.0"
] | mmcauliffe/corpus-creation-scripts | esports/valorant/scrape_game_changers.py | 3,897 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .thing import Thing
class CreativeWork(Thing):
"""The most generic kind of creative work, including books, movies,
photographs, software programs, etc.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Action, MediaObject, Recipe
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource. To use the URL,
append query parameters as appropriate and include the
Ocp-Apim-Subscription-Key header.
:vartype read_link: str
:ivar web_search_url: The URL to Bing's search result for this item.
:vartype web_search_url: str
:ivar name: The name of the thing represented by this object.
:vartype name: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar image: An image of the item.
:vartype image:
~azure.cognitiveservices.search.visualsearch.models.ImageObject
:ivar description: A short description of the item.
:vartype description: str
:ivar alternate_name: An alias for the item.
:vartype alternate_name: str
:ivar bing_id: An ID that uniquely identifies this item.
:vartype bing_id: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.visualsearch.models.Thing]
:ivar date_published: The date on which the CreativeWork was published.
:vartype date_published: str
:ivar text: Text content of this creative work.
:vartype text: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'name': {'readonly': True},
'url': {'readonly': True},
'image': {'readonly': True},
'description': {'readonly': True},
'alternate_name': {'readonly': True},
'bing_id': {'readonly': True},
'thumbnail_url': {'readonly': True},
'provider': {'readonly': True},
'date_published': {'readonly': True},
'text': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'image': {'key': 'image', 'type': 'ImageObject'},
'description': {'key': 'description', 'type': 'str'},
'alternate_name': {'key': 'alternateName', 'type': 'str'},
'bing_id': {'key': 'bingId', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'date_published': {'key': 'datePublished', 'type': 'str'},
'text': {'key': 'text', 'type': 'str'},
}
_subtype_map = {
'_type': {'Action': 'Action', 'MediaObject': 'MediaObject', 'Recipe': 'Recipe'}
}
def __init__(self, **kwargs) -> None:
super(CreativeWork, self).__init__(**kwargs)
self.thumbnail_url = None
self.provider = None
self.date_published = None
self.text = None
self._type = 'CreativeWork'
| 39.074766 | 87 | 0.604162 | [
"MIT"
] | AlexanderYukhanov/azure-sdk-for-python | azure-cognitiveservices-search-visualsearch/azure/cognitiveservices/search/visualsearch/models/creative_work_py3.py | 4,181 | Python |
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
class UnicornException(Exception):
def __init__(self, name: str):
self.name = name
app = FastAPI()
@app.exception_handler(UnicornException)
async def unicorn_exception_handler(request: Request, exc: UnicornException):
return JSONResponse(
status_code=418,
content={"message": f"Oops! {exc.name} did something. There goes a rainbow..."},
)
@app.get("/unicorns/{name}")
async def read_unicorn(name: str):
if name == "yolo":
raise UnicornException(name=name)
return {"unicorn_name": name}
| 24.076923 | 88 | 0.699681 | [
"MIT"
] | 0417taehyun/fastapi | docs_src/handling_errors/tutorial003.py | 626 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This is very different to AboutModules in Ruby Koans
# Our AboutMultipleInheritance class is a little more comparable
#
from runner.koan import *
#
# Package hierarchy of Python Koans project:
#
# contemplate_koans.py
# koans/
# __init__.py
# about_asserts.py
# about_attribute_access.py
# about_class_attributes.py
# about_classes.py
# ...
# a_package_folder/
# __init__.py
# a_module.py
class AboutPackages(Koan):
def test_subfolders_can_form_part_of_a_module_package(self):
# Import ./a_package_folder/a_module.py
from a_package_folder.a_module import Duck
duck = Duck()
self.assertEqual("Donald", duck.name)
def test_subfolders_become_modules_if_they_have_an_init_module(self):
# Import ./a_package_folder/__init__.py
from a_package_folder import an_attribute
self.assertEqual(1984, an_attribute)
def test_subfolders_without_an_init_module_are_not_part_of_the_package(self):
# Import ./a_normal_folder/
try:
import a_normal_folder
except ImportError as ex:
self.assertMatch('normal', ex[0])
# ------------------------------------------------------------------
def test_use_absolute_imports_to_import_upper_level_modules(self):
# Import /contemplate_koans.py
import contemplate_koans
self.assertEqual('contemplate_koans', contemplate_koans.__name__)
# contemplate_koans.py is the root module in this package because its
# the first python module called in koans.
#
# If contemplate_koan.py was based in a_package_folder that would be
# the root folder, which would make reaching the koans folder
# almost impossible. So always leave the starting python script in
# a folder which can reach everything else.
def test_import_a_module_in_a_subfolder_using_an_absolute_path(self):
# Import contemplate_koans.py/koans/a_package_folder/a_module.py
from koans.a_package_folder.a_module import Duck
self.assertEqual('koans.a_package_folder.a_module', Duck.__module__)
| 31.942029 | 81 | 0.687387 | [
"MIT"
] | nitinnain/python_koans | python2/koans/about_packages.py | 2,204 | Python |
""" python site scraping tool """
import xml.etree.ElementTree as ET
from StringIO import StringIO
import unicodedata
import re
import requests
from BuildItParser import BuildItParser
def http_get(url):
""" simple wrapper around http get """
try:
request = requests.get(url)
# not concerned with returning nice utf-8, as only the urls count
text = unicodedata.normalize('NFKD', request.text
).encode('ascii', 'ignore')
return (text, 200)
except requests.HTTPError as http_error:
if request.status_code == 404:
print "{} not found: {}".format(url, http_error)
return ("", 404)
else:
# simplify all other errors as 500's
print "error retrieving {}: {}".format(url, http_error)
return ("", 500)
def process_html(html_page, this_parser):
""" extract links from an html page """
this_parser.feed(html_page)
return {
"int_links": this_parser.int_links,
"ext_links": this_parser.ext_links,
"static_links": this_parser.static_links
}
def process_xml(xml_sitemap, regex):
""" extract links from xml """
site_map_paths = set([])
url_paths = set([])
try:
# need to strip namespaces
ns_stripped = ET.iterparse(StringIO(xml_sitemap))
for _, element in ns_stripped:
if '}' in element.tag:
element.tag = element.tag.split('}', 1)[1]
xml_root = ns_stripped.root
for found_sitemap in xml_root.findall('sitemap'):
sitemap_loc = found_sitemap.find('loc')
new_sitemap = sitemap_loc.text
new_path = regex.search(new_sitemap)
if new_path is not None:
site_map_paths.add(new_path.group(1))
for found_url in xml_root.findall('url'):
url_loc = found_url.find('loc')
new_url = url_loc.text
new_path = regex.search(new_url)
if new_path is not None:
new_path = new_path.group(1)
url_paths.add(new_path)
except Exception as XML_Error:
print "Exception trying to parse sitemap: {}".format(XML_Error)
raise XML_Error
return (site_map_paths, url_paths)
def main():
""" main function """
site = "http://wiprodigital.com"
site_regex = re.compile(r"{}(.+)$".format(site))
site_structure = []
paths_to_visit = set(["index.html", "index.php"])
paths_visited = set([])
sitemaps = set(["sitemap.xml"])
sitemaps_still_to_process = True
while sitemaps_still_to_process:
# print "Processing paths..."
num_sitemaps = len(sitemaps)
for sitemap in sitemaps:
sitemap_url = "{}/{}".format(site, sitemap)
# print "sitemap: {}".format(sitemap_url)
(xml, http_code) = http_get(sitemap_url)
(sitemap_paths, url_paths) = process_xml(xml, site_regex)
new_sitemaps = set([])
for sitemap_path in sitemap_paths:
new_sitemaps.add(sitemap_path)
for url_path in url_paths:
paths_to_visit.add(url_path)
sitemaps = sitemaps.union(new_sitemaps)
if num_sitemaps == len(sitemaps):
sitemaps_still_to_process = False
html_parser = BuildItParser(site_regex)
paths_still_to_process = True
while paths_still_to_process:
num_paths = len(paths_to_visit)
new_paths = set([])
# print "Processing paths..."
for path in paths_to_visit:
if path not in paths_visited:
page_url = "{}/{}".format(site, path)
# print "page: {}".format(page_url)
(page, code) = http_get(page_url)
if code == 200:
new_page = process_html(page, html_parser)
new_page["path"] = path
site_structure.append(new_page)
for internal_link in new_page["int_links"]:
if internal_link not in paths_visited:
new_paths.add(internal_link)
paths_visited.add(path)
paths_to_visit = paths_to_visit.union(new_paths)
if num_paths == len(paths_to_visit):
# no new paths added
paths_still_to_process = False
print "SITE: {}".format(site)
for page in sorted(site_structure, key=lambda p: p["path"]):
print "PAGE: {}".format(page["path"])
for int_link in page["int_links"]:
print " internal link: {}".format(int_link)
for ext_link in page["ext_links"]:
print " external link: {}".format(ext_link)
for static_link in page["static_links"]:
print " static link: {}".format(static_link)
if __name__ == "__main__":
main()
| 35.532847 | 73 | 0.588537 | [
"MIT"
] | PhilipHarries/bi_scraper | scraper.py | 4,868 | Python |
from django.contrib.auth.decorators import permission_required
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from catalog import models as cmod
from django_mako_plus import view_function, jscontext
import requests
import json
# @permission_required('manager')
@view_function
def process_request (request):
category_name = request.GET.get('category')
product_name = request.GET.get('name')
max_price = request.GET.get('max_price')
page = request.GET.get('page')
if page is not None:
pnum = int(page)
else:
pnum = 1
products = []
qry = cmod.Product.objects.all()
if product_name:
qry = qry.filter(name__icontains=product_name)
if max_price:
qry = qry.filter(price__lte=max_price)
if category_name:
qry = qry.filter(category__name__icontains=category_name)
qry = qry.order_by('category','name')
for p in qry:
item = {
'category': p.category.name,
'name': p.name,
'price': p.price,
}
products.append(item)
products = products[(pnum - 1)*6:pnum*6]
return JsonResponse(products, safe=False)
| 33.108108 | 72 | 0.665306 | [
"Apache-2.0"
] | whitneyann/INTEX2 | catalog/views/search.py | 1,225 | Python |
import os
import scipy.misc as misc
import shutil
import cv2
import Constants
import numpy as np
from skimage import morphology
def extract_each_layer(image, threshold):
"""
This image processing funtion is designed for the OCT image post processing.
It can remove the small regions and find the OCT layer boundary under the specified threshold.
:param image:
:param threshold:
:return:
"""
# convert the output to the binary image
ret, binary = cv2.threshold(image, threshold, 1, cv2.THRESH_BINARY)
bool_binary = np.array(binary, bool)
# remove the small object
remove_binary = morphology.remove_small_objects(bool_binary, min_size=25000,
connectivity=2,
in_place=False)
c = np.multiply(bool_binary, remove_binary)
final_binary = np.zeros(shape=np.shape(binary))
final_binary[c == True] = 1
binary_image = cv2.filter2D(final_binary, -1, np.array([[-1], [1]]))
layer_one = np.zeros(shape=[1, np.shape(binary_image)[1]])
for i in range(np.shape(binary_image)[1]):
location_point = np.where(binary_image[:, i] > 0)[0]
# print(location_point)
if len(location_point) == 1:
layer_one[0, i] = location_point
elif len(location_point) == 0:
layer_one[0, i] = layer_one[0, i-1]
else:
layer_one[0, i] = location_point[0]
return layer_one
if __name__ == '__main__':
image_path = '/home/jimmyliu/Zaiwang/crop-OCT/train/562.fds/crop-images/' \
'oct202.png'
gt_path = '/home/jimmyliu/Zaiwang/crop-OCT/train/562.fds/crop-gt/' \
'oct202.png'
image = cv2.imread(image_path)
gt = cv2.imread(gt_path, cv2.IMREAD_GRAYSCALE)
cv2.imwrite('gt.png', gt)
print(np.max(image), np.shape(image))
print(np.max(gt), np.shape(gt)) | 34.22807 | 98 | 0.618145 | [
"Apache-2.0"
] | TobyLing/Comparative-Study-of-Deep-Learning-Models-for-Segmentation-of-Corpus-Callosum | image_utils.py | 1,951 | Python |
class ModelNotFoundException(Exception):
pass
class UnknownFunctionException(Exception):
pass
| 14.857143 | 42 | 0.788462 | [
"MIT"
] | paul-wolf/djaq | djaq/exceptions.py | 104 | Python |
# qubit number=3
# total number=31
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=28
prog.cz(input_qubit[0],input_qubit[2]) # number=29
prog.h(input_qubit[2]) # number=30
prog.x(input_qubit[2]) # number=12
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit166.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('qasm_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 29.99 | 140 | 0.631044 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | data/p3BR/R2/benchmark/startQiskit166.py | 5,998 | Python |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
from urlparse import parse_qsl, urlparse
from django.conf import settings
from django.http import HttpResponse
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.utils import simplejson
from funfactory.urlresolvers import reverse
from mock import ANY, call, Mock, patch
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
from bedrock.firefox import views as fx_views
from bedrock.firefox.firefox_details import FirefoxDetails, MobileDetails
from bedrock.firefox.utils import product_details
from bedrock.mozorg.tests import TestCase
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
PROD_DETAILS_DIR = os.path.join(TEST_DATA_DIR, 'product_details_json')
GOOD_PLATS = {'Windows': {}, 'OS X': {}, 'Linux': {}}
with patch.object(settings, 'PROD_DETAILS_DIR', PROD_DETAILS_DIR):
firefox_details = FirefoxDetails()
mobile_details = MobileDetails()
class TestInstallerHelp(TestCase):
def setUp(self):
self.button_mock = Mock()
self.patcher = patch.dict('jingo.env.globals',
download_firefox=self.button_mock)
self.patcher.start()
self.view_name = 'firefox.installer-help'
with self.activate('en-US'):
self.url = reverse(self.view_name)
def tearDown(self):
self.patcher.stop()
def test_buttons_use_lang(self):
"""
The buttons should use the lang from the query parameter.
"""
self.client.get(self.url, {
'installer_lang': 'fr'
})
self.button_mock.assert_has_calls([
call(force_direct=True, force_full_installer=True, locale='fr'),
call('beta', small=ANY, force_direct=True,
force_full_installer=True, icon=ANY, locale='fr'),
call('aurora', small=ANY, force_direct=True,
force_full_installer=True, icon=ANY, locale='fr'),
])
def test_buttons_ignore_non_lang(self):
"""
The buttons should ignore an invalid lang.
"""
self.client.get(self.url, {
'installer_lang': 'not-a-locale'
})
self.button_mock.assert_has_calls([
call(force_direct=True, force_full_installer=True, locale=None),
call('beta', small=ANY, force_direct=True,
force_full_installer=True, icon=ANY, locale=None),
call('aurora', small=ANY, force_direct=True,
force_full_installer=True, icon=ANY, locale=None),
])
def test_invalid_channel_specified(self):
"""
All buttons should show when channel is invalid.
"""
self.client.get(self.url, {
'channel': 'dude',
})
self.button_mock.assert_has_calls([
call(force_direct=True, force_full_installer=True, locale=None),
call('beta', small=ANY, force_direct=True,
force_full_installer=True, icon=ANY, locale=None),
call('aurora', small=ANY, force_direct=True,
force_full_installer=True, icon=ANY, locale=None),
])
def test_one_button_when_channel_specified(self):
"""
There should be only one button when the channel is given.
"""
self.client.get(self.url, {
'channel': 'beta',
})
self.button_mock.assert_called_once_with('beta', force_direct=True,
force_full_installer=True,
locale=None)
@patch.object(fx_views, 'firefox_details', firefox_details)
class TestFirefoxDetails(TestCase):
def test_get_download_url(self):
url = firefox_details.get_download_url('OS X', 'pt-BR', '17.0.1')
self.assertListEqual(parse_qsl(urlparse(url).query),
[('product', 'firefox-17.0.1-SSL'),
('os', 'osx'),
('lang', 'pt-BR')])
# Linux 64-bit
url = firefox_details.get_download_url('Linux 64', 'en-US', '17.0.1')
self.assertListEqual(parse_qsl(urlparse(url).query),
[('product', 'firefox-17.0.1-SSL'),
('os', 'linux64'),
('lang', 'en-US')])
@patch.dict(firefox_details.firefox_versions,
FIREFOX_AURORA='28.0a2')
def test_get_download_url_aurora(self):
"""The Aurora version should give us an FTP url."""
url = firefox_details.get_download_url('OS X', 'en-US', '28.0a2')
self.assertIn('ftp.mozilla.org', url)
self.assertIn('latest-mozilla-aurora/firefox-28.0a2.en-US.mac.dmg', url)
@patch.dict(firefox_details.firefox_versions,
FIREFOX_AURORA='28.0a2')
def test_get_download_url_aurora_l10n(self):
"""Aurora non en-US should have a slightly different path."""
url = firefox_details.get_download_url('Linux', 'pt-BR', '28.0a2')
self.assertIn('ftp.mozilla.org', url)
self.assertIn('latest-mozilla-aurora-l10n/firefox-28.0a2.pt-BR.linux-i686.tar.bz2',
url)
@override_settings(STUB_INSTALLER_LOCALES={'win': settings.STUB_INSTALLER_ALL})
def get_download_url_ssl(self):
"""
SSL-enabled links should always be used except Windows stub installers.
"""
# SSL-enabled links won't be used for Windows builds (but SSL download
# is enabled by default for stub installers)
url = firefox_details.get_download_url('Windows', 'pt-BR', '27.0')
self.assertListEqual(parse_qsl(urlparse(url).query),
[('product', 'firefox-27.0'),
('os', 'win'),
('lang', 'pt-BR')])
# SSL-enabled links will be used for OS X builds
url = firefox_details.get_download_url('OS X', 'pt-BR', '27.0')
self.assertListEqual(parse_qsl(urlparse(url).query),
[('product', 'firefox-27.0-SSL'),
('os', 'osx'),
('lang', 'pt-BR')])
# SSL-enabled links will be used for Linux builds
url = firefox_details.get_download_url('Linux', 'pt-BR', '27.0')
self.assertListEqual(parse_qsl(urlparse(url).query),
[('product', 'firefox-27.0-SSL'),
('os', 'linux'),
('lang', 'pt-BR')])
def test_filter_builds_by_locale_name(self):
# search english
builds = firefox_details.get_filtered_full_builds(
firefox_details.latest_version('release'),
'ujara'
)
eq_(len(builds), 1)
eq_(builds[0]['name_en'], 'Gujarati')
# search native
builds = firefox_details.get_filtered_full_builds(
firefox_details.latest_version('release'),
u'જરા'
)
eq_(len(builds), 1)
eq_(builds[0]['name_en'], 'Gujarati')
# with a space
builds = firefox_details.get_filtered_full_builds(
firefox_details.latest_version('release'),
'british english'
)
eq_(len(builds), 1)
eq_(builds[0]['name_en'], 'English (British)')
# with a comma
builds = firefox_details.get_filtered_full_builds(
firefox_details.latest_version('release'),
u'French, Français'
)
eq_(len(builds), 1)
eq_(builds[0]['name_en'], 'French')
def test_linux64_build(self):
builds = firefox_details.get_filtered_full_builds(
firefox_details.latest_version('release')
)
url = builds[0]['platforms']['Linux 64']['download_url']
eq_(parse_qsl(urlparse(url).query)[1], ('os', 'linux64'))
@patch.dict(firefox_details.firefox_versions,
FIREFOX_ESR='24.2')
def test_esr_major_versions(self):
"""ESR versions should be dynamic based on data."""
eq_(firefox_details.esr_major_versions, [24])
@patch.dict(firefox_details.firefox_versions,
FIREFOX_ESR='24.6.0',
FIREFOX_ESR_NEXT='31.0.0')
def test_esr_major_versions_prev(self):
"""ESR versions should show previous when available."""
eq_(firefox_details.esr_major_versions, [24, 31])
@patch.dict(firefox_details.firefox_versions,
LATEST_FIREFOX_VERSION='Phoenix',
FIREFOX_ESR='Albuquerque')
def test_esr_major_versions_no_latest(self):
"""ESR versions should not blow up if current version is broken."""
eq_(firefox_details.esr_major_versions, [])
@patch.dict(firefox_details.firefox_versions,
LATEST_FIREFOX_VERSION='18.0.1')
def test_latest_major_version(self):
"""latest_major_version should return an int of the major version."""
eq_(firefox_details.latest_major_version('release'), 18)
@patch.dict(firefox_details.firefox_versions,
LATEST_FIREFOX_VERSION='Phoenix')
def test_latest_major_version_no_int(self):
"""latest_major_version should return 0 when no int."""
eq_(firefox_details.latest_major_version('release'), 0)
@patch.object(fx_views, 'mobile_details', mobile_details)
class TestMobileDetails(TestCase):
@patch.dict(mobile_details.mobile_details,
version='22.0.1')
def test_latest_release_version(self):
"""latest_version should return the latest release version."""
eq_(mobile_details.latest_version('release'), '22.0.1')
@patch.dict(mobile_details.mobile_details,
beta_version='23.0')
def test_latest_beta_version(self):
"""latest_version should return the latest beta version."""
eq_(mobile_details.latest_version('beta'), '23.0')
@patch.object(fx_views, 'firefox_details', firefox_details)
class TestFirefoxAll(TestCase):
def setUp(self):
with self.activate('en-US'):
self.url = reverse('firefox.all')
def test_no_search_results(self):
"""
Tables should be gone and not-found message should be shown when there
are no search results.
"""
resp = self.client.get(self.url + '?q=DOES_NOT_EXIST')
doc = pq(resp.content)
ok_(not doc('table.build-table'))
ok_(not doc('.not-found.hide'))
def test_no_search_query(self):
"""
When not searching all builds should show.
"""
resp = self.client.get(self.url)
doc = pq(resp.content)
eq_(len(doc('.build-table')), 2)
eq_(len(doc('.not-found.hide')), 2)
release = firefox_details.latest_version('release')
num_builds = len(firefox_details.get_filtered_full_builds(release))
num_builds += len(firefox_details.get_filtered_test_builds(release))
eq_(len(doc('tr[data-search]')), num_builds)
def test_no_locale_details(self):
"""
When a localized build has been added to the Firefox details while the
locale details are not updated yet, the filtered build list should not
include the localized build.
"""
release = firefox_details.latest_version('release')
builds = firefox_details.get_filtered_full_builds(release)
ok_('uz' in firefox_details.firefox_primary_builds)
ok_('uz' not in firefox_details.languages)
eq_(len([build for build in builds if build['locale'] == 'uz']), 0)
class TestFirefoxPartners(TestCase):
@patch('bedrock.firefox.views.settings.DEBUG', True)
def test_js_bundle_files_debug_true(self):
"""
When DEBUG is on the bundle should return the individual files
with the MEDIA_URL.
"""
bundle = 'partners_desktop'
files = settings.MINIFY_BUNDLES['js'][bundle]
files = [settings.MEDIA_URL + f for f in files]
self.assertEqual(files,
json.loads(fx_views.get_js_bundle_files(bundle)))
@patch('bedrock.firefox.views.settings.DEBUG', False)
def test_js_bundle_files_debug_false(self):
"""
When DEBUG is off the bundle should return a single minified filename.
"""
bundle = 'partners_desktop'
filename = '%sjs/%s-min.js?build=' % (settings.MEDIA_URL, bundle)
bundle_file = json.loads(fx_views.get_js_bundle_files(bundle))
self.assertEqual(len(bundle_file), 1)
self.assertTrue(bundle_file[0].startswith(filename))
@patch('bedrock.mozorg.views.requests.post')
def test_sf_form_proxy_error_response(self, post_patch):
"""An error response from SF should be returned."""
new_mock = Mock()
new_mock.status_code = 400
post_patch.return_value = new_mock
with self.activate('en-US'):
url = reverse('mozorg.partnerships')
resp = self.client.post(url, {
'first_name': 'The',
'last_name': 'Dude',
'company': 'Urban Achievers',
'email': '[email protected]',
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 400)
# decode JSON response
resp_data = simplejson.loads(resp.content)
self.assertEqual(resp_data['msg'], 'bad_request')
self.assertTrue(post_patch.called)
@patch('bedrock.mozorg.views.requests.post')
def test_sf_form_proxy_invalid_form(self, post_patch):
"""A form error should result in a 400 response."""
with self.activate('en-US'):
url = reverse('mozorg.partnerships')
resp = self.client.post(url, {
'first_name': 'Dude' * 20,
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 400)
# decode JSON response
resp_data = simplejson.loads(resp.content)
self.assertEqual(resp_data['msg'], 'Form invalid')
self.assertFalse(post_patch.called)
@patch('bedrock.mozorg.views.requests.post')
def test_sf_form_proxy(self, post_patch):
new_mock = Mock()
new_mock.status_code = 200
post_patch.return_value = new_mock
with self.activate('en-US'):
url = reverse('mozorg.partnerships')
resp = self.client.post(url, {
'first_name': 'The',
'last_name': 'Dude',
'title': 'Abider of things',
'company': 'Urban Achievers',
'email': '[email protected]',
}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(resp.status_code, 200)
# decode JSON response
resp_data = simplejson.loads(resp.content)
self.assertEqual(resp_data['msg'], 'ok')
post_patch.assert_called_once_with(ANY, {
'first_name': u'The',
'last_name': u'Dude',
'description': u'',
'retURL': 'http://www.mozilla.org/en-US/about/'
'partnerships?success=1',
'title': u'Abider of things',
'URL': u'',
'company': u'Urban Achievers',
'oid': '00DU0000000IrgO',
'phone': u'',
'street': u'',
'zip': u'',
'city': u'',
'state': u'',
'country': u'',
'mobile': u'',
'00NU0000002pDJr': [],
'email': u'[email protected]',
'lead_source': 'www.mozilla.org/about/partnerships/',
})
def test_sf_form_csrf_status(self):
"""Test that CSRF checks return 200 with token and 403 without."""
csrf_client = Client(enforce_csrf_checks=True)
response = csrf_client.get(reverse('firefox.partners.index'))
post_url = reverse('mozorg.partnerships')
response = csrf_client.post(post_url, {
'first_name': "Partner",
'csrfmiddlewaretoken': response.cookies['csrftoken'].value,
})
self.assertEqual(response.status_code, 200)
response = csrf_client.post(post_url, {'first_name': "Partner"})
self.assertEqual(response.status_code, 403)
none_mock = Mock()
none_mock.return_value = None
@patch.object(fx_views.WhatsnewView, 'redirect_to', none_mock)
@patch('bedrock.firefox.views.l10n_utils.render', return_value=HttpResponse())
class TestWhatsNew(TestCase):
def setUp(self):
self.view = fx_views.WhatsnewView.as_view()
self.rf = RequestFactory(HTTP_USER_AGENT='Firefox')
@override_settings(DEV=True)
def test_can_post(self, render_mock):
"""Home page must accept post for newsletter signup."""
req = self.rf.post('/en-US/firefox/whatsnew/')
self.view(req)
# would return 405 before calling render otherwise
render_mock.assert_called_once_with(req, ['firefox/whatsnew.html'], ANY)
@patch.object(fx_views.WhatsnewView, 'fxos_locales', ['de'])
@override_settings(DEV=True)
def test_fxos_locales(self, render_mock):
"""Should use a different template for fxos locales."""
req = self.rf.get('/de/firefox/whatsnew/')
req.locale = 'de'
self.view(req)
template = render_mock.call_args[0][1]
ctx = render_mock.call_args[0][2]
ok_('locales_with_video' not in ctx)
eq_(template, ['firefox/whatsnew-fxos.html'])
@override_settings(DEV=True)
def test_fx_australis_29(self, render_mock):
"""Should use australis template for 29.0."""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='29.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/whatsnew-no-tour.html'])
@override_settings(DEV=True)
def test_fx_australis_29_0_1(self, render_mock):
"""Should use australis template for 29.0.1"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='29.0.1')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/whatsnew-no-tour.html'])
@override_settings(DEV=True)
def test_fx_30(self, render_mock):
"""Should use australis template for 30.0."""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='30.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/whatsnew-no-tour.html'])
@override_settings(DEV=True)
def test_fx_31(self, render_mock):
"""Should use australis template for 31.0."""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='31.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/whatsnew-no-tour.html'])
@override_settings(DEV=True)
def test_fx_33_0(self, render_mock):
"""Should use australis template for 33.0."""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='33.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/whatsnew-no-tour.html'])
@override_settings(DEV=True)
def test_fx_33_0_1(self, render_mock):
"""Should use australis template for 33.0.1"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='33.0.1')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/whatsnew-no-tour.html'])
@override_settings(DEV=True)
def test_fx_33_1(self, render_mock):
"""Should use privacy tour template for 33.1"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='33.1')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/privacy_tour/no-tour.html'])
@override_settings(DEV=True)
def test_fx_34_0(self, render_mock):
"""Should use search tour template for 34.0"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/search_tour/no-tour.html'])
@override_settings(DEV=True)
def test_fx_34_0_1(self, render_mock):
"""Should use search tour template for 34.0.1"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='34.0.1')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/search_tour/no-tour.html'])
@override_settings(DEV=True)
def test_fx_34_1(self, render_mock):
"""Should use search tour template for 34.1"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='34.1')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/search_tour/no-tour.html'])
@override_settings(DEV=True)
def test_fx_34_0_locale(self, render_mock):
"""Should use australis template for 34.0 non en-US locales"""
req = self.rf.get('/de/firefox/whatsnew/')
req.locale = 'de'
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/whatsnew-no-tour.html'])
@override_settings(DEV=True)
def test_fx_35_0(self, render_mock):
"""Should use search tour template for 35.0"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='35.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/search_tour/no-tour.html'])
@override_settings(DEV=True)
def test_fx_35_0_locale(self, render_mock):
"""Should use australis template for 35.0 non en-US locales"""
req = self.rf.get('/de/firefox/whatsnew/')
req.locale = 'de'
self.view(req, version='35.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/whatsnew-no-tour.html'])
@override_settings(DEV=True)
def test_rv_prefix(self, render_mock):
"""Prefixed oldversion shouldn't impact version sniffing."""
req = self.rf.get('/en-US/firefox/whatsnew/?oldversion=rv:10.0')
self.view(req, version='33.1')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/privacy_tour/tour.html'])
@override_settings(DEV=False)
def test_fx_australis_secure_redirect(self, render_mock):
"""Should redirect to https: for 29.0."""
url = '/en-US/firefox/whatsnew/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp['location'], 'https://testserver' + url)
@override_settings(DEV=True)
def test_fx_australis_secure_redirect_not_dev(self, render_mock):
"""Should not redirect to https: in DEV mode."""
url = '/en-US/firefox/whatsnew/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200)
@override_settings(DEV=True)
def test_fx_australis_secure_redirect_secure(self, render_mock):
"""Should not redirect to https: when already secure."""
url = '/en-US/firefox/whatsnew/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=True):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200)
@patch.object(fx_views.TourView, 'redirect_to', none_mock)
@patch('bedrock.firefox.views.l10n_utils.render', return_value=HttpResponse())
class TestTourView(TestCase):
def setUp(self):
self.view = fx_views.TourView.as_view()
self.rf = RequestFactory(HTTP_USER_AGENT='Firefox')
@override_settings(DEV=True)
def test_fx_tour_template(self, render_mock):
"""Should use firstrun tour template"""
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='29.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-tour.html'])
@override_settings(DEV=True)
def test_fx_dev_browser_35_0_a2(self, render_mock):
"""Should use dev browser firstrun template for 35.0a2"""
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='35.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html'])
@override_settings(DEV=True)
def test_fx_dev_browser_35_1_a2(self, render_mock):
"""Should use dev browser firstrun template for 35.1a2"""
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='35.1a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html'])
@override_settings(DEV=True)
def test_fx_dev_browser_36_0_a2(self, render_mock):
"""Should use dev browser firstrun template for 36.0a2"""
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='36.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html'])
@override_settings(DEV=True)
def test_fx_dev_browser_34_0_a2(self, render_mock):
"""Should use standard firstrun template for older aurora"""
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='34.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-tour.html'])
@override_settings(DEV=True)
def test_fx_search_tour_34_0(self, render_mock):
"""Should use search tour template for 34.0"""
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-34-tour.html'])
@override_settings(DEV=True)
def test_fx_search_tour_34_0_5(self, render_mock):
"""Should use search tour template for 34.0.5"""
req = self.rf.get('/en-US/firefox/tour/')
self.view(req, version='34.0.5')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-34-tour.html'])
@override_settings(DEV=True)
def test_fx_search_tour_34_0_locales(self, render_mock):
"""Should use australis template for 34.0 non en-US locales"""
req = self.rf.get('/en-US/firefox/tour/')
req.locale = 'de'
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/help-menu-tour.html'])
@override_settings(DEV=False)
def test_fx_australis_secure_redirect(self, render_mock):
"""Should redirect to https"""
url = '/en-US/firefox/tour/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp['location'], 'https://testserver' + url)
@override_settings(DEV=True)
def test_fx_australis_secure_redirect_not_dev(self, render_mock):
"""Should not redirect to https: in DEV mode."""
url = '/en-US/firefox/tour/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200)
@override_settings(DEV=True)
def test_fx_australis_secure_redirect_secure(self, render_mock):
"""Should not redirect to https: when already secure."""
url = '/en-US/firefox/tour/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=True):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200)
@patch.object(fx_views.FirstrunView, 'redirect_to', none_mock)
@patch('bedrock.firefox.views.l10n_utils.render', return_value=HttpResponse())
class TestFirstRun(TestCase):
def setUp(self):
self.view = fx_views.FirstrunView.as_view()
self.rf = RequestFactory()
@override_settings(DEV=True)
def test_can_post(self, render_mock):
"""Home page must accept post for newsletter signup."""
req = self.rf.post('/en-US/firefox/firstrun/')
self.view(req)
# would return 405 before calling render otherwise
render_mock.assert_called_once_with(req,
['firefox/australis/firstrun-tour.html'], ANY)
@override_settings(DEV=True)
def test_fx_australis_29(self, render_mock):
"""Should use firstrun tour template"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='29.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-tour.html'])
@override_settings(DEV=True)
def test_fx_dev_browser_35_0_a2(self, render_mock):
"""Should use dev browser firstrun template for 35.0a2"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='35.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html'])
@override_settings(DEV=True)
def test_fx_dev_browser_35_1_a2(self, render_mock):
"""Should use dev browser firstrun template for 35.1a2"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='35.1a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html'])
@override_settings(DEV=True)
def test_fx_dev_browser_36_0_a2(self, render_mock):
"""Should use dev browser firstrun template for 36.0a2"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='36.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/dev-firstrun.html'])
@override_settings(DEV=True)
def test_fx_dev_browser_34_0_a2(self, render_mock):
"""Should use standard firstrun template for older aurora"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='34.0a2')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-tour.html'])
@override_settings(DEV=True)
def test_fx_search_tour_34_0(self, render_mock):
"""Should use search tour template for 34.0"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-34-tour.html'])
@override_settings(DEV=True)
def test_fx_search_tour_34_0_5(self, render_mock):
"""Should use search tour template for 34.0.5"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='34.0.5')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-34-tour.html'])
@override_settings(DEV=True)
def test_fx_search_tour_34_0_locales(self, render_mock):
"""Should use australis template for 34.0 non en-US locales"""
req = self.rf.get('/en-US/firefox/firstrun/')
req.locale = 'de'
self.view(req, version='34.0')
template = render_mock.call_args[0][1]
eq_(template, ['firefox/australis/firstrun-tour.html'])
@override_settings(DEV=False)
def test_fx_australis_secure_redirect(self, render_mock):
"""Should redirect to https:"""
url = '/en-US/firefox/firstrun/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp['location'], 'https://testserver' + url)
@override_settings(DEV=True)
def test_fx_australis_secure_redirect_not_dev(self, render_mock):
"""Should not redirect to https: in DEV mode."""
url = '/en-US/firefox/firstrun/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=False):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200)
@override_settings(DEV=True)
def test_fx_australis_secure_redirect_secure(self, render_mock):
"""Should not redirect to https: when already secure."""
url = '/en-US/firefox/firstrun/'
req = self.rf.get(url)
with patch.object(req, 'is_secure', return_value=True):
resp = self.view(req, version='29.0')
eq_(resp.status_code, 200)
@patch.object(fx_views, 'firefox_details', firefox_details)
class FxVersionRedirectsMixin(object):
@override_settings(DEV=True) # avoid https redirects
def assert_ua_redirects_to(self, ua, url_name, status_code=301):
response = self.client.get(self.url, HTTP_USER_AGENT=ua)
eq_(response.status_code, status_code)
eq_(response['Vary'], 'User-Agent')
eq_(response['Location'],
'http://testserver%s' % reverse(url_name))
# An additional redirect test with a query string
query = '?ref=getfirefox'
response = self.client.get(self.url + query, HTTP_USER_AGENT=ua)
eq_(response.status_code, status_code)
eq_(response['Vary'], 'User-Agent')
eq_(response['Location'],
'http://testserver%s' % reverse(url_name) + query)
def test_non_firefox(self):
"""
Any non-Firefox user agents should be permanently redirected to
/firefox/new/.
"""
user_agent = 'random'
self.assert_ua_redirects_to(user_agent, 'firefox.new')
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='13.0.5')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version',
return_value=('13.0.5', GOOD_PLATS))
def test_current_minor_version_firefox(self, latest_mock):
"""
Should show current even if behind by a patch version
"""
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:13.0) '
'Gecko/20100101 Firefox/13.0')
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent')
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='25.0',
FIREFOX_ESR='24.1')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version',
return_value=('25.0', GOOD_PLATS))
def test_esr_firefox(self, latest_mock):
"""
Currently released ESR firefoxen should not redirect. At present
that is 24.0.x.
"""
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:24.0) '
'Gecko/20100101 Firefox/24.0')
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent')
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='16.0')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version',
return_value=('16.0', GOOD_PLATS))
def test_current_firefox(self, latest_mock):
"""
Currently released firefoxen should not redirect.
"""
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:16.0) '
'Gecko/20100101 Firefox/16.0')
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent')
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='16.0')
@patch('bedrock.mozorg.helpers.download_buttons.latest_version',
return_value=('16.0', GOOD_PLATS))
def test_future_firefox(self, latest_mock):
"""
Pre-release firefoxen should not redirect.
"""
user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:18.0) '
'Gecko/20100101 Firefox/18.0')
response = self.client.get(self.url, HTTP_USER_AGENT=user_agent)
eq_(response.status_code, 200)
eq_(response['Vary'], 'User-Agent')
class TestWhatsnewRedirect(FxVersionRedirectsMixin, TestCase):
def setUp(self):
self.user_agent = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:29.0) '
'Gecko/20100101 Firefox/29.0')
self.expected = 'data-has-tour="True"'
self.url = reverse('firefox.whatsnew', args=['33.1'])
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='16.0')
def test_whatsnew_tour_oldversion(self):
"""Should not show tour if upgrading from 33.1 onwards."""
# sanity check that it should show for other values of "oldversion"
response = self.client.get(self.url + '?oldversion=28.0', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=27.0.1', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=4.0', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=rv:10.0', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=33.0', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=33.0.1', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=33.1', HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=33.1.1', HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=34.0', HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=35.0', HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
# if there's no oldversion parameter, show no tour
response = self.client.get(self.url, HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
@override_settings(DEV=True)
@patch.dict(product_details.firefox_versions,
LATEST_FIREFOX_VERSION='16.0')
def test_whatsnew_search_tour_oldversion(self):
"""Should not show tour if upgrading from 34.0 onwards."""
self.url = reverse('firefox.whatsnew', args=['34.1'])
# sanity check that it should show for other values of "oldversion"
response = self.client.get(self.url + '?oldversion=28.0', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=27.0.1', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=4.0', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=rv:10.0', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=33.0', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=33.0.1', HTTP_USER_AGENT=self.user_agent)
self.assertIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=34.0', HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=34.0.1', HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=34.1', HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
response = self.client.get(self.url + '?oldversion=35.0', HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
# if there's no oldversion parameter, show no tour
response = self.client.get(self.url, HTTP_USER_AGENT=self.user_agent)
self.assertNotIn(self.expected, response.content)
| 41.935583 | 101 | 0.635238 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | MozFux/bedrock | bedrock/firefox/tests/test_base.py | 41,020 | Python |
from tkinter import *
# import math
# https://www.youtube.com/watch?v=r5EQCSW_rLQ pyramid math formulas TIME=3:55
class Pyramid:
# contants
BLOCK_HEIGHT = 1.5 # meters
BLOCK_WIDTH = 2 # meters
BLOCK_LENGTH = 2.5 # meters
BLOCK_WEIGHT = 15000 # kg
# __init__ is Python's constructor method
def __init__(self, pyramidSideLength, pyramidHeight):
self.pyramidSideLength = pyramidSideLength
self.pyramidHeight = pyramidHeight
# processing
def calculateBlockVolume(self, height, width, length):
return height * width * length
def calculateGroundArea(self, length):
return length ** 2
def calculatePyramidVolume(self, groundArea, height):
return round((groundArea / 3) * height)
def countBlocks(self, pyramidVolume, blockVolume):
return round(pyramidVolume / blockVolume)
def calculateMass(self, blocks, weight):
return blocks * weight
# this type of function might not be suitable for inside a class, but going with it for now
def createNewPyramid(self):
# create superscript for displaying exponents
superscript = str.maketrans("0123456789", "⁰¹²³⁴⁵⁶⁷⁸⁹")
displayMetersSquared = 'm2'.translate(superscript)
displayMetersCubed = 'm3'.translate(superscript)
# storing function output in variables for program readability
blockVolume = self.calculateBlockVolume(Pyramid.BLOCK_HEIGHT, Pyramid.BLOCK_WIDTH, Pyramid.BLOCK_LENGTH)
groundAreaCovered = self.calculateGroundArea(self.pyramidSideLength)
pyramidVolume = self.calculatePyramidVolume(groundAreaCovered, self.pyramidHeight)
countOfBlocks = self.countBlocks(pyramidVolume, blockVolume)
mass = self.calculateMass(countOfBlocks, Pyramid.BLOCK_WEIGHT)
# build nicely formatted answer for display
displayAnswer = '\n' + \
'Ground Area Covered = {:,} {}'.format(groundAreaCovered, displayMetersSquared) + '\n' + \
'Pyramid Volume = {:,.0f} {}'.format(pyramidVolume, displayMetersCubed) + '\n' + \
'Blocks = {:,}'.format(countOfBlocks) + '\n' + \
'Mass = {:,} kg'.format(mass) + '\n\n' + \
'*Pyramid is not to scale.'
return displayAnswer
class GridLines:
def __init__(self, canvas, canvas_width, canvas_height, grid_space):
self.canvas = canvas
self.canvas_width = canvas_width
self.canvas_height = canvas_height
self.grid_space = grid_space
def __vertical_lines__(self):
for i in range(0, self.canvas_height, self.grid_space):
self.canvas.create_line(i, 0, i, self.canvas_height, fill='thistle1')
def __horizontal_lines__(self):
for i in range(0, self.canvas_width, self.grid_space):
self.canvas.create_line(0, i, self.canvas_width, i, fill='CadetBlue1')
def create_grid(self):
self.__vertical_lines__()
self.__horizontal_lines__()
def createPyramid(apex, base, height):
canvas_width = 400
canvas_height = 400
canvas = Canvas(root, width=canvas_width, height=canvas_height)
canvas.grid(row=5, column=0, columnspan=2, sticky=E)
canvas.configure(background='white')
grid = GridLines(canvas, canvas_width, canvas_height, 20)
GridLines.create_grid(grid)
x_center = apex[0]
y_top = apex[1]
y_bottom = y_top + height
y_middle = y_top + height / 1.6
half_base = base / 2
x_left = (x_center - (half_base))
x_right = x_center + (half_base)
right_offset = ((base * .6) - base)
x_right_rear = x_right + right_offset
left_offset = ((base * 1.1) - half_base)
x_left_rear = x_center - left_offset
# facing triangle
points = [[x_left,y_bottom], [x_right,y_bottom], apex]
canvas.create_polygon(points, outline='black', fill='Gray95')
# left side shadow
points3 = [apex, [x_left_rear,y_middle], [x_left,y_bottom]]
canvas.create_polygon(points3, outline='black', fill='Gray85')
# triangle lines
canvas.create_line(x_center, y_top, x_right_rear, y_middle, fill='thistle3', dash=(4,4)) # back right
canvas.create_line(x_right_rear, y_middle, x_left_rear, y_middle, fill='CadetBlue3', dash=(4,4)) # back middle
canvas.create_line(x_right_rear, y_middle, x_left, y_bottom, fill='PaleGreen3', dash=(4,4)) # cross positive
canvas.create_line(x_left_rear, y_middle, x_right, y_bottom, fill='PaleGreen3', dash=(4,4)) # cross negative
canvas.create_line(x_right_rear, y_middle, x_right, y_bottom, fill='CadetBlue3', dash=(4,4)) # right connector
def clickFunction():
apex = [200,100]
pyramid_base = int(widthText.get())
pyramid_height = int(heightText.get())
# build instance of Pyramid
new_pyramid = Pyramid(pyramid_base, pyramid_height)
# display results of instance (outputs calculated data)
pyramid_dimensions_output = Pyramid.createNewPyramid(new_pyramid)
responseLabel = Label(root, text=pyramid_dimensions_output, justify=LEFT)
responseLabel.grid(row=4, column=0, columnspan=2, ipadx='110', sticky=W)
# outputs 3D graphic of pyramid (not to scale)
createPyramid(apex, pyramid_base, pyramid_height)
root = Tk()
APP_NAME = 'Pyramid Builder'
root.iconbitmap('pyramid.ico')
root.title(APP_NAME)
root.geometry("600x700+1100+200")
header = Label(root, text=APP_NAME, font='Helvetica 18 bold')
header.grid(row=0, column=0, columnspan=2, pady='10')
# width entry
widthLabel = Label(root, text="Enter Base (in meters):")
widthLabel.grid(row=1, column=0, ipadx='30', sticky=W)
widthText = Entry(root)
widthText.grid(row=1, column=1, ipadx="100")
widthText.focus()
# height entry
heightLabel = Label(root, text="Enter Height (in meters):")
heightLabel.grid(row=2, column=0, ipadx='30', sticky=W)
heightText = Entry(root)
heightText.grid(row=2, column=1, ipadx="100")
# buttons
buttonFrame = Frame(root)
buttonFrame.grid(row=3, column=1, sticky=E)
submitButton = Button(buttonFrame, text="Submit", command=clickFunction)
closeButton = Button(buttonFrame, text='Close', command=root.destroy)
submitButton.pack(side='left', padx='2')
closeButton.pack(side='left', padx='2')
# root.grid_columnconfigure(0, minsize=80)
# root.grid_rowconfigure(0, pad=5)
root.mainloop()
# automated tests (would normally be in separate file just for testing)
print('\nrunning automated tests...')
import unittest
# would use "import Pyramid" here if in separate file
class TestPyramid(unittest.TestCase):
# "self" is not normally required in: ClassName.methodCall(self), but needed because class is in same file as unit test
def test_calculateBlockVolume(self):
# only one test for block size (not supposed to change)
self.assertEqual(Pyramid.calculateBlockVolume(self, 1.5, 2, 2.5), 7.5)
def test_calculateGroundArea(self):
self.assertEqual(Pyramid.calculateGroundArea(self, 80), 6400)
self.assertEqual(Pyramid.calculateGroundArea(self, 236), 55696)
def test_calculatePyramidVolume(self):
self.assertEqual(Pyramid.calculatePyramidVolume(self, 6400, 64), 136533)
self.assertEqual(Pyramid.calculatePyramidVolume(self, 55696, 138), 2562016)
def test_countBlocks(self):
self.assertEqual(Pyramid.countBlocks(self, 136533, 7.5), 18204)
self.assertEqual(Pyramid.countBlocks(self, 2562016, 7.5), 341602)
def test_calculateMass(self):
self.assertEqual(Pyramid.calculateMass(self, 18204, 15000), 273060000)
self.assertEqual(Pyramid.calculateMass(self, 341602, 15000), 5124030000)
# Runs the unit tests automatically in the current file
if __name__ == '__main__':
unittest.main()
# ===========================================
# Visual of correct output for 2 test cases:
# results agree with automated unit testing
# and hand calculations (in MS Excel).
# ===================================
# input data:
# pyramid side length = 80 meters;
# pyramid height = 64 meters
# ========
# results:
# ground area covered = 6,400 m²
# pyramid volume = 136,533 m³
# blocks = 18,204
# mass = 273,060,000 kg
# ===================================
# input data:
# pyramid side length = 236 meters;
# pyramid height = 138 meters
# ========
# results:
# ground area covered = 55,696 m²
# pyramid volume = 2,562,016 m³
# blocks = 341,602
# mass = 5,124,030,000 kg
# ===========================================
| 35.809322 | 123 | 0.681694 | [
"MIT"
] | justinclark-dev/CSC110 | assignments/assignment-2-pyramid-builder-gui.py | 8,472 | Python |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import typing
from cryptography import x509
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import (
dsa,
ec,
ed25519,
ed448,
rsa,
)
from cryptography.hazmat.primitives.asymmetric.types import (
PRIVATE_KEY_TYPES,
)
_ALLOWED_PKCS12_TYPES = typing.Union[
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
ed25519.Ed25519PrivateKey,
ed448.Ed448PrivateKey,
]
class PKCS12Certificate:
def __init__(
self,
cert: x509.Certificate,
friendly_name: typing.Optional[bytes],
):
if not isinstance(cert, x509.Certificate):
raise TypeError("Expecting x509.Certificate object")
if friendly_name is not None and not isinstance(friendly_name, bytes):
raise TypeError("friendly_name must be bytes or None")
self._cert = cert
self._friendly_name = friendly_name
@property
def friendly_name(self) -> typing.Optional[bytes]:
return self._friendly_name
@property
def certificate(self) -> x509.Certificate:
return self._cert
def __eq__(self, other: object) -> bool:
if not isinstance(other, PKCS12Certificate):
return NotImplemented
return (
self.certificate == other.certificate
and self.friendly_name == other.friendly_name
)
def __hash__(self) -> int:
return hash((self.certificate, self.friendly_name))
def __repr__(self) -> str:
return "<PKCS12Certificate({}, friendly_name={!r})>".format(
self.certificate, self.friendly_name
)
class PKCS12KeyAndCertificates:
def __init__(
self,
key: typing.Optional[PRIVATE_KEY_TYPES],
cert: typing.Optional[PKCS12Certificate],
additional_certs: typing.List[PKCS12Certificate],
):
if key is not None and not isinstance(
key,
(
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
ed25519.Ed25519PrivateKey,
ed448.Ed448PrivateKey,
),
):
raise TypeError(
"Key must be RSA, DSA, EllipticCurve, ED25519, or ED448"
" private key, or None."
)
if cert is not None and not isinstance(cert, PKCS12Certificate):
raise TypeError("cert must be a PKCS12Certificate object or None")
if not all(
isinstance(add_cert, PKCS12Certificate)
for add_cert in additional_certs
):
raise TypeError(
"all values in additional_certs must be PKCS12Certificate"
" objects"
)
self._key = key
self._cert = cert
self._additional_certs = additional_certs
@property
def key(self) -> typing.Optional[PRIVATE_KEY_TYPES]:
return self._key
@property
def cert(self) -> typing.Optional[PKCS12Certificate]:
return self._cert
@property
def additional_certs(self) -> typing.List[PKCS12Certificate]:
return self._additional_certs
def __eq__(self, other: object) -> bool:
if not isinstance(other, PKCS12KeyAndCertificates):
return NotImplemented
return (
self.key == other.key
and self.cert == other.cert
and self.additional_certs == other.additional_certs
)
def __hash__(self) -> int:
return hash((self.key, self.cert, tuple(self.additional_certs)))
def __repr__(self) -> str:
fmt = (
"<PKCS12KeyAndCertificates(key={}, cert={}, additional_certs={})>"
)
return fmt.format(self.key, self.cert, self.additional_certs)
def load_key_and_certificates(
data: bytes,
password: typing.Optional[bytes],
backend: typing.Any = None,
) -> typing.Tuple[
typing.Optional[PRIVATE_KEY_TYPES],
typing.Optional[x509.Certificate],
typing.List[x509.Certificate],
]:
from cryptography.hazmat.backends.openssl.backend import backend as ossl
return ossl.load_key_and_certificates_from_pkcs12(data, password)
def load_pkcs12(
data: bytes,
password: typing.Optional[bytes],
backend: typing.Any = None,
) -> PKCS12KeyAndCertificates:
from cryptography.hazmat.backends.openssl.backend import backend as ossl
return ossl.load_pkcs12(data, password)
_PKCS12_CAS_TYPES = typing.Union[
x509.Certificate,
PKCS12Certificate,
]
def serialize_key_and_certificates(
name: typing.Optional[bytes],
key: typing.Optional[_ALLOWED_PKCS12_TYPES],
cert: typing.Optional[x509.Certificate],
cas: typing.Optional[typing.Iterable[_PKCS12_CAS_TYPES]],
encryption_algorithm: serialization.KeySerializationEncryption,
) -> bytes:
if key is not None and not isinstance(
key,
(
rsa.RSAPrivateKey,
dsa.DSAPrivateKey,
ec.EllipticCurvePrivateKey,
ed25519.Ed25519PrivateKey,
ed448.Ed448PrivateKey,
),
):
raise TypeError(
"Key must be RSA, DSA, EllipticCurve, ED25519, or ED448"
" private key, or None."
)
if cert is not None and not isinstance(cert, x509.Certificate):
raise TypeError("cert must be a certificate or None")
if cas is not None:
cas = list(cas)
if not all(
isinstance(
val,
(
x509.Certificate,
PKCS12Certificate,
),
)
for val in cas
):
raise TypeError("all values in cas must be certificates")
if not isinstance(
encryption_algorithm, serialization.KeySerializationEncryption
):
raise TypeError(
"Key encryption algorithm must be a "
"KeySerializationEncryption instance"
)
if key is None and cert is None and not cas:
raise ValueError("You must supply at least one of key, cert, or cas")
from cryptography.hazmat.backends.openssl.backend import backend
return backend.serialize_key_and_certificates_to_pkcs12(
name, key, cert, cas, encryption_algorithm
)
| 29.459091 | 79 | 0.632773 | [
"MIT"
] | ianmota/WebScrappingBimCollab | venv/Lib/site-packages/cryptography/hazmat/primitives/serialization/pkcs12.py | 6,481 | Python |
"""Classification Report"""
# Authors: Jeffrey Wang
# License: BSD 3 clause
import numpy as np
from sleepens.analysis import multiconfusion_matrix
def calculate_statistics(Y_hat, Y, beta=1, average=None):
"""
Calculate the precisions, recalls, F-beta scores, and
supports for each class in `targets`.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
List of data labels.
Y : array-like, shape=(n_samples,)
List of target truth labels.
beta : float, default=1
Strength of recall relative to precision in the F-score.
average : {'micro', 'macro', 'weighted', None}, default=None
The type of averaging to perform on statistics. Must be one of:
- None : Do not perform averaging, statistics for each class
are returned.
- 'micro' : Calculate globally, counting total true positives,
false negatives, and false positives.
- 'macro' : Calculate per class an unweighted mean.
- 'weighted' : Calculate per class the mean weighted by support.
Returns
-------
precisions : float or dict
Dictionary of precisions for each class if `average` is None.
Averaged precision based on averaging method if provided.
recalls : float or dict
Dictionary of recalls for each class if `average` is None.
Averaged recall based on averaging method if provided.
fscores : float or dict
Dictionary of fscores for each class if `average` is None.
Averaged fscore based on averaging method if provided.
supports : float or dict
Dictionary of supports for each class if `average` is None.
Total support (number of classes) if averaging method is provided.
"""
if beta < 0:
raise ValueError("Beta must be non-negative")
matrix = multiconfusion_matrix(Y_hat, Y)
matrix_labels = list(matrix.keys())
matrix = np.array([matrix[l] for l in matrix_labels])
tp_sum = matrix[:,1,1]
label_sum = tp_sum + matrix[:,0,1]
target_sum = tp_sum + matrix[:,1,0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
label_sum = np.array([label_sum.sum()])
target_sum = np.array([target_sum.sum()])
with np.errstate(divide='ignore', invalid='ignore'):
precisions = np.divide(tp_sum, label_sum,
out=np.zeros(tp_sum.shape, dtype=float),
where=label_sum!=0)
recalls = np.divide(tp_sum, target_sum,
out=np.zeros(tp_sum.shape, dtype=float),
where=target_sum!=0)
if np.isposinf(beta):
fscores = recalls
else:
beta2 = beta ** 2
denom = beta2 * precisions + recalls
valid = np.where(denom != 0)[0]
fscores = np.zeros_like(denom)
fscores[valid] = (1 + beta2) * precisions[valid] * recalls[valid] / denom[valid]
if average == 'weighted':
weights = target_sum
if target_sum.sum() == 0:
return 0, 0, 0, target_sum.sum()
else:
weights = None
if average is not None:
precisions = np.average(precisions, weights=weights)
recalls = np.average(recalls, weights=weights)
fscores = np.average(fscores, weights=weights)
supports = target_sum.sum()
else:
precisions = {matrix_labels[k]: precisions[k] for k in range(len(matrix_labels))}
recalls = {matrix_labels[k]: recalls[k] for k in range(len(matrix_labels))}
fscores = {matrix_labels[k]: fscores[k] for k in range(len(matrix_labels))}
supports = {matrix_labels[k]: target_sum[k] for k in range(len(matrix_labels))}
return precisions, recalls, fscores, supports
def classification_report(Y_hat, Y, beta=1):
"""
Create a report on classification statistics.
Parameters
----------
Y_hat : array-like, shape=(n_samples,)
List of data labels.
Y : array-like, shape=(n_samples,)
List of target truth labels.
beta : float, default=1
Strength of recall relative to precision in the F-score.
Returns
-------
report : dict
Dictionary containing classification statistics in the following
structure:
- 'label': {
'precision':0.5,
'recall':1.0,
'f-score':0.67,
'support':1
},
...
- 'beta': 1,
- 'support': 5,
- 'accuracy': 0.8,
- 'macro avg': {
'precision':0.6,
'recall':0.9,
'f-score':0.67,
},
- 'weighted avg': {
'precision':0.67,
'recall':0.9,
'f-score':0.67,
}
"""
stats = calculate_statistics(Y_hat, Y, beta=beta)
_, _, accuracy, total = calculate_statistics(Y_hat, Y, beta=beta, average='micro')
macro = calculate_statistics(Y_hat, Y, beta=beta, average='macro')
weighted = calculate_statistics(Y_hat, Y, beta=beta, average='weighted')
h = ['precision', 'recall', 'f-score', 'support']
report = {
'beta': beta,
'support': total,
'accuracy': accuracy,
'macro avg': {h[i]: macro[i] for i in range(len(h))},
'weighted avg': {h[i]: weighted[i] for i in range(len(h))}
}
classes = set(stats[0].keys())
for c in classes:
report[c] = {h[i]: stats[i][c] for i in range(len(h))}
return report
| 32.051613 | 84 | 0.652979 | [
"BSD-3-Clause"
] | paradoxysm/sleepens | sleepens/analysis/_report.py | 4,968 | Python |
import numpy as np
try:
from cs231n.im2col_cython import col2im_cython, im2col_cython
from cs231n.im2col_cython import col2im_6d_cython
except ImportError:
print ('run the following from the cs231n directory and try again:')
print ('python setup.py build_ext --inplace')
print ('You may also need to restart your iPython kernel')
from cs231n.im2col import *
def conv_forward_im2col(x, w, b, conv_param):
"""
A fast implementation of the forward pass for a convolutional layer
based on im2col and col2im.
"""
N, C, H, W = x.shape
num_filters, _, filter_height, filter_width = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - filter_width) % stride == 0, 'width does not work'
assert (H + 2 * pad - filter_height) % stride == 0, 'height does not work'
# Create output
out_height = (H + 2 * pad - filter_height) / stride + 1
out_width = (W + 2 * pad - filter_width) / stride + 1
out = np.zeros((N, num_filters, out_height, out_width), dtype=x.dtype)
# x_cols = im2col_indices(x, w.shape[2], w.shape[3], pad, stride)
x_cols = im2col_cython(x, w.shape[2], w.shape[3], pad, stride)
res = w.reshape((w.shape[0], -1)).dot(x_cols) + b.reshape(-1, 1)
out = res.reshape(w.shape[0], out.shape[2], out.shape[3], x.shape[0])
out = out.transpose(3, 0, 1, 2)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_forward_strides(x, w, b, conv_param):
N, C, H, W = x.shape
F, _, HH, WW = w.shape
stride, pad = conv_param['stride'], conv_param['pad']
# Check dimensions
assert (W + 2 * pad - WW) % stride == 0, 'width does not work'
assert (H + 2 * pad - HH) % stride == 0, 'height does not work'
# Pad the input
p = pad
x_padded = np.pad(x, ((0, 0), (0, 0), (p, p), (p, p)), mode='constant')
# Figure out output dimensions
H += 2 * pad
W += 2 * pad
out_h = (H - HH) / stride + 1
out_w = (W - WW) / stride + 1
# Perform an im2col operation by picking clever strides
shape = (C, HH, WW, N, out_h, out_w)
strides = (H * W, W, 1, C * H * W, stride * W, stride)
strides = x.itemsize * np.array(strides)
x_stride = np.lib.stride_tricks.as_strided(x_padded,
shape=shape, strides=strides)
x_cols = np.ascontiguousarray(x_stride)
x_cols.shape = (C * HH * WW, N * out_h * out_w)
# Now all our convolutions are a big matrix multiply
res = w.reshape(F, -1).dot(x_cols) + b.reshape(-1, 1)
# Reshape the output
res.shape = (F, N, out_h, out_w)
out = res.transpose(1, 0, 2, 3)
# Be nice and return a contiguous array
# The old version of conv_forward_fast doesn't do this, so for a fair
# comparison we won't either
out = np.ascontiguousarray(out)
cache = (x, w, b, conv_param, x_cols)
return out, cache
def conv_backward_strides(dout, cache):
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
N, C, H, W = x.shape
F, _, HH, WW = w.shape
_, _, out_h, out_w = dout.shape
db = np.sum(dout, axis=(0, 2, 3))
dout_reshaped = dout.transpose(1, 0, 2, 3).reshape(F, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(F, -1).T.dot(dout_reshaped)
dx_cols.shape = (C, HH, WW, N, out_h, out_w)
dx = col2im_6d_cython(dx_cols, N, C, H, W, HH, WW, pad, stride)
return dx, dw, db
def conv_backward_im2col(dout, cache):
"""
A fast implementation of the backward pass for a convolutional layer
based on im2col and col2im.
"""
x, w, b, conv_param, x_cols = cache
stride, pad = conv_param['stride'], conv_param['pad']
db = np.sum(dout, axis=(0, 2, 3))
num_filters, _, filter_height, filter_width = w.shape
dout_reshaped = dout.transpose(1, 2, 3, 0).reshape(num_filters, -1)
dw = dout_reshaped.dot(x_cols.T).reshape(w.shape)
dx_cols = w.reshape(num_filters, -1).T.dot(dout_reshaped)
# dx = col2im_indices(dx_cols, x.shape, filter_height, filter_width, pad, stride)
dx = col2im_cython(dx_cols, x.shape[0], x.shape[1], x.shape[2], x.shape[3],
filter_height, filter_width, pad, stride)
return dx, dw, db
conv_forward_fast = conv_forward_strides
conv_backward_fast = conv_backward_strides
def max_pool_forward_fast(x, pool_param):
"""
A fast implementation of the forward pass for a max pooling layer.
This chooses between the reshape method and the im2col method. If the pooling
regions are square and tile the input image, then we can use the reshape
method which is very fast. Otherwise we fall back on the im2col method, which
is not much faster than the naive method.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
same_size = pool_height == pool_width == stride
tiles = H % pool_height == 0 and W % pool_width == 0
if same_size and tiles:
out, reshape_cache = max_pool_forward_reshape(x, pool_param)
cache = ('reshape', reshape_cache)
else:
out, im2col_cache = max_pool_forward_im2col(x, pool_param)
cache = ('im2col', im2col_cache)
return out, cache
def max_pool_backward_fast(dout, cache):
"""
A fast implementation of the backward pass for a max pooling layer.
This switches between the reshape method an the im2col method depending on
which method was used to generate the cache.
"""
method, real_cache = cache
if method == 'reshape':
return max_pool_backward_reshape(dout, real_cache)
elif method == 'im2col':
return max_pool_backward_im2col(dout, real_cache)
else:
raise ValueError('Unrecognized method "%s"' % method)
def max_pool_forward_reshape(x, pool_param):
"""
A fast implementation of the forward pass for the max pooling layer that uses
some clever reshaping.
This can only be used for square pooling regions that tile the input.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert pool_height == pool_width == stride, 'Invalid pool params'
assert H % pool_height == 0
assert W % pool_height == 0
x_reshaped = x.reshape(N, C, H / pool_height, pool_height,
W / pool_width, pool_width)
out = x_reshaped.max(axis=3).max(axis=4)
cache = (x, x_reshaped, out)
return out, cache
def max_pool_backward_reshape(dout, cache):
"""
A fast implementation of the backward pass for the max pooling layer that
uses some clever broadcasting and reshaping.
This can only be used if the forward pass was computed using
max_pool_forward_reshape.
NOTE: If there are multiple argmaxes, this method will assign gradient to
ALL argmax elements of the input rather than picking one. In this case the
gradient will actually be incorrect. However this is unlikely to occur in
practice, so it shouldn't matter much. One possible solution is to split the
upstream gradient equally among all argmax elements; this should result in a
valid subgradient. You can make this happen by uncommenting the line below;
however this results in a significant performance penalty (about 40% slower)
and is unlikely to matter in practice so we don't do it.
"""
x, x_reshaped, out = cache
dx_reshaped = np.zeros_like(x_reshaped)
out_newaxis = out[:, :, :, np.newaxis, :, np.newaxis]
mask = (x_reshaped == out_newaxis)
dout_newaxis = dout[:, :, :, np.newaxis, :, np.newaxis]
dout_broadcast, _ = np.broadcast_arrays(dout_newaxis, dx_reshaped)
dx_reshaped[mask] = dout_broadcast[mask]
dx_reshaped /= np.sum(mask, axis=(3, 5), keepdims=True)
dx = dx_reshaped.reshape(x.shape)
return dx
def max_pool_forward_im2col(x, pool_param):
"""
An implementation of the forward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
assert (H - pool_height) % stride == 0, 'Invalid height'
assert (W - pool_width) % stride == 0, 'Invalid width'
out_height = (H - pool_height) / stride + 1
out_width = (W - pool_width) / stride + 1
x_split = x.reshape(N * C, 1, H, W)
x_cols = im2col(x_split, pool_height, pool_width, padding=0, stride=stride)
x_cols_argmax = np.argmax(x_cols, axis=0)
x_cols_max = x_cols[x_cols_argmax, np.arange(x_cols.shape[1])]
out = x_cols_max.reshape(out_height, out_width, N, C).transpose(2, 3, 0, 1)
cache = (x, x_cols, x_cols_argmax, pool_param)
return out, cache
def max_pool_backward_im2col(dout, cache):
"""
An implementation of the backward pass for max pooling based on im2col.
This isn't much faster than the naive version, so it should be avoided if
possible.
"""
x, x_cols, x_cols_argmax, pool_param = cache
N, C, H, W = x.shape
pool_height, pool_width = pool_param['pool_height'], pool_param['pool_width']
stride = pool_param['stride']
dout_reshaped = dout.transpose(2, 3, 0, 1).flatten()
dx_cols = np.zeros_like(x_cols)
dx_cols[x_cols_argmax, np.arange(dx_cols.shape[1])] = dout_reshaped
dx = col2im_indices(dx_cols, (N * C, 1, H, W), pool_height, pool_width,
padding=0, stride=stride)
dx = dx.reshape(x.shape)
return dx
| 34.295203 | 83 | 0.689585 | [
"MIT"
] | anandsaha/cs231n.assignments | 2016winter/assignment2/cs231n/fast_layers.py | 9,294 | Python |
#-----------------------------------------------------------------------------
# Copyright (c) 2014, Ryan Volz
# All rights reserved.
#
# Distributed under the terms of the BSD 3-Clause ("BSD New") license.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
import os
import glob
import fnmatch
from echolect.core.indexing import (find_index, slice_by_value, wrap_check_start,
wrap_check_stop)
from . import raw_parsing
__all__ = ['file_times', 'find_files', 'find_files_recursive', 'map_file_blocks',
'read_voltage',
'voltage_reader']
#******** See raw_parsing.py for details on Jicamarca raw data format ***************
def find_files(fdir, pattern='D*.r'):
files = glob.glob(os.path.join(fdir, pattern))
files.sort()
return np.asarray(files)
def find_files_recursive(fdir, pattern='D*.r'):
files = []
for dirpath, dirnames, filenames in os.walk(fdir):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(dirpath, filename))
files.sort()
return np.asarray(files)
def file_times(files):
file_times = []
# find time that begins each file
for fpath in files:
with open(fpath, 'rb') as f:
h = raw_parsing.read_first_header(f)
time = raw_parsing.parse_time(h)
file_times.append(time)
return np.asarray(file_times)
def map_file_blocks(fpath):
# read all of the headers, block times, and location for start of each data block
headers = []
block_times = []
data_start_bytes = []
with open(fpath, 'rb') as f:
# get necesary info from first header
h = raw_parsing.read_first_header(f)
headers.append(h)
time = raw_parsing.parse_time(h)
block_times.append(time)
data_start_bytes.append(f.tell())
block_size = h['nSizeOfDataBlock']
while True:
# skip over the previous block of data
f.seek(block_size, 1)
# read the block's header
try:
h = raw_parsing.read_basic_header(f)
except EOFError:
break
time = raw_parsing.parse_time(h)
# check validity of header
# assume that if time is 0, the subsequent block was
# not written and hence EOF has been reached
if time == 0:
break
headers.append(h)
block_times.append(time)
data_start_bytes.append(f.tell())
headers = np.asarray(headers)
block_times = np.asarray(block_times)
data_start_bytes = np.asarray(data_start_bytes)
return headers, block_times, data_start_bytes
class voltage_reader(object):
def __init__(self, fpath):
self.fpath = fpath
headers, block_times, data_start_bytes = map_file_blocks(fpath)
self.headers = headers
self.block_times = block_times
self.data_start_bytes = data_start_bytes
h = headers[0]
raw_dtype, dtype = raw_parsing.parse_dtype(h)
self.raw_dtype = raw_dtype
self.dtype = dtype
block_shape = raw_parsing.parse_block_shape(h)
self.block_shape = block_shape
self.nprofiles_per_block = block_shape[0]
self.nsamples_per_profile = block_shape[1]
self.nchannels = block_shape[2]
self.nitems_per_profile = block_shape[1]*block_shape[2]
self.nblocks = len(block_times)
self.nprofiles = self.nblocks*block_shape[0]
self.profile_bytes = self.nitems_per_profile*self.raw_dtype.itemsize
self.shape = (self.nprofiles, self.nchannels, self.nsamples_per_profile)
self.ts = raw_parsing.parse_ts(h)
self.ipp = raw_parsing.parse_ipp(h)
self.r = raw_parsing.parse_range_index(h)
def __getitem__(self, key):
if not isinstance(key, tuple):
pidx = key
cidx = slice(None)
sidx = slice(None)
else:
lkey = len(key)
if lkey < 1 or lkey > 3:
raise IndexError('Wrong number of indices')
elif lkey == 1:
pidx = key[0]
cidx = slice(None)
sidx = slice(None)
elif lkey == 2:
pidx = key[0]
cidx = key[1]
sidx = slice(None)
else:
pidx = key[0]
cidx = key[1]
sidx = key[2]
if isinstance(pidx, int):
return self.read_voltage(pidx, chan_idx=cidx, sample_idx=sidx)
# pidx must be a slice object
start, stop, step = pidx.indices(self.shape[0])
return self.read_voltage(start, stop, step, cidx, sidx)
def _read_from_block(self, block_num, start, stop, step, chan_idx, sample_idx):
num = stop - start
# so we drop dimension when num == 1
if num == 1:
prof_idx = 0
else:
prof_idx = slice(0, num, step)
fstart = self.data_start_bytes[block_num] + self.profile_bytes*start
with open(self.fpath, 'rb') as f:
f.seek(fstart)
vlt_raw = np.fromfile(f, self.raw_dtype, num*self.nitems_per_profile)
# data arranged by channel, then sample, then profile
try:
vlt_raw = vlt_raw.reshape(num, self.nsamples_per_profile, self.nchannels)
except ValueError: # we didn't get the number of samples we expected, reshape fails
raise EOFError('End of file reached. Could not read requested data.')
# swap axes so it matches what we want, and slice as desired
vlt_raw = vlt_raw.swapaxes(1, 2)[prof_idx, chan_idx, sample_idx]
vlt = np.empty(vlt_raw.shape, dtype=self.dtype)
vlt.real = vlt_raw['real']
vlt.imag = vlt_raw['imag']
return vlt
def read_from_block(self, block_num, start, stop=None, step=1, nframes=1,
chan_idx=slice(None), sample_idx=slice(None)):
start = wrap_check_start(self.nprofiles_per_block, start)
if stop is None:
stop = start + step*nframes
else:
stop = wrap_check_stop(self.nprofiles_per_block, stop)
## change ints to lists so that we don't lose dimensions when indexing
#if isinstance(chan_idx, int):
#chan_idx = [chan_idx]
#if isinstance(sample_idx, int):
#sample_idx = [sample_idx]
return self._read_from_block(block_num, start, stop, step, chan_idx, sample_idx)
def _read_from_blocks(self, blocknumstart, blockstart, blocknumend, blockstop,
step, chan_idx, sample_idx):
if blocknumstart == blocknumend:
return self.read_from_block(blocknumstart, blockstart, blockstop, step,
chan_idx, sample_idx)
start = blockstart
vlt_all = []
for bnum in xrange(blocknumstart, blocknumend + 1):
if bnum == blocknumend:
vlt_all.append(self.read_from_block(bnum, start, blockstop, step,
chan_idx, sample_idx))
else:
vlt_all.append(self.read_from_block(bnum, start, self.nprofiles_per_block, step,
chan_idx, sample_idx))
# set start for (possibly != 0) based on step
# step - ((nprofiles - start) % step) == (start - nprofiles) % step
start = (start - self.nprofiles_per_block) % step
return np.concatenate(vlt_all, axis=0)
def read_voltage(self, start, stop=None, step=1, nframes=1,
chan_idx=slice(None), sample_idx=slice(None)):
start = wrap_check_start(self.shape[0], start)
if stop is None:
stop = start + step*nframes
else:
stop = wrap_check_stop(self.shape[0], stop)
## change ints to lists so that we don't lose dimensions when indexing
#if isinstance(chan_idx, int):
#chan_idx = [chan_idx]
#if isinstance(sample_idx, int):
#sample_idx = [sample_idx]
# find blocks for start and stop
bstart, strt = divmod(start, self.nprofiles_per_block)
# want block of last profile to include, hence profile number end = stop - 1
end = stop - 1
bend, nend = divmod(end, self.nprofiles_per_block)
stp = nend + 1
return self._read_from_blocks(bstart, strt, bend, stp, step, chan_idx, sample_idx)
def read_voltage(fpath, key=slice(None)):
vlt_r = voltage_reader(fpath)
return vlt_r[key]
| 37.712446 | 96 | 0.589393 | [
"BSD-3-Clause"
] | ryanvolz/echolect | echolect/jicamarca/read_raw.py | 8,787 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 18:18:48 2020
@author: tsuyogbasnet
"""
import os
import sys
import pickle
from tqdm import tqdm
from scipy.io import wavfile
from python_speech_features import mfcc
from keras.models import load_model
import pandas as pd
from sklearn.metrics import accuracy_score
import numpy as np
def build_predictions(audio_dir):
y_true = []
y_pred = []
fn_prob = {}
if len(os.listdir(audio_dir)) == 0:
print("No files found for classification")
return False, False, False
print("Extracting feature from audio files")
for file in tqdm(os.listdir(audio_dir)):
try:
rate, signal = wavfile.read(os.path.join(audio_dir, file))
label = filename_to_class[file]
c = classes.index(label)
y_prob = []
print("Classifying audio files")
for i in tqdm(range(0, signal.shape[0]-config.step, config.step)):
sample = signal[i:i+config.step]
x = mfcc(sample, rate, numcep=config.nfeat, nfilt=config.nfilt, nfft=config.nfft)
x = (x-config._min) / (config._max - config._min)
if config.mode == 'conv':
x = x.reshape(1, x.shape[0], x.shape[1], 1)
elif config.mode == 'time':
x = np.expand_dims(x, axis=0)
y_hat = model.predict(x)
y_prob.append(y_hat)
y_pred.append(np.argmax(y_hat))
y_true.append(c)
fn_prob[file] = np.mean(y_prob, axis=0).flatten()
except:
print("Something went wrong some files", sys.exc_info()[0])
return y_true, y_pred, fn_prob
data_frame = pd.read_csv('instruments.csv')
classes = list(np.unique(data_frame.label))
filename_to_class = dict(zip(data_frame.fname,data_frame.label))
p_path = os.path.join('pickles','conv.p')
with open(p_path, 'rb') as handle:
config = pickle.load(handle)
model = load_model(config.model_path)
y_true, y_pred, fn_prob = build_predictions('testcleanfiles')
if(y_true and y_pred and fn_prob):
acc_score = accuracy_score(y_true=y_true, y_pred=y_pred)
y_probs = []
for i, row in data_frame.iterrows():
y_prob = fn_prob[row.fname]
y_probs.append(y_prob)
for c, p in zip(classes, y_prob):
data_frame.at[i,c] = p
y_pred = [classes[np.argmax(y)] for y in y_probs]
data_frame['y_pred'] = y_pred
data_frame.to_csv('prediction.csv', index=False)
| 28.451613 | 97 | 0.597884 | [
"MIT"
] | pvsnp9/audio_classification_using_deep_learning | predict.py | 2,646 | Python |
# Copyright (c) 2009-2018 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from . import value_with_optional_details
from .benchmark import Benchmark
from .benchmark_suite import BenchmarkSuite
from .exp_run_details import ExpRunDetails
from .exp_variables import ExpVariables
from .reporting import Reporting
class Experiment(object):
@classmethod
def compile(cls, name, exp, configurator):
description = exp.get('description')
desc = exp.get('desc')
data_file = exp.get('data_file') or configurator.data_file
reporting = Reporting.compile(exp.get('reporting', {}), configurator.reporting,
configurator.options, configurator.ui)
run_details = ExpRunDetails.compile(exp, configurator.run_details)
variables = ExpVariables.compile(exp, ExpVariables.empty())
executions = exp.get('executions')
suites = exp.get('suites')
return Experiment(name, description or desc, data_file, reporting,
run_details, variables, configurator, executions, suites)
def __init__(self, name, description, data_file, reporting, run_details,
variables, configurator, executions, suites):
self.name = name
self._description = description
self._data_file = data_file
self._run_details = run_details
self._variables = variables
self._reporting = reporting
self._data_store = configurator.data_store
self._persistence = self._data_store.get(data_file, configurator)
self._suites = self._compile_executors_and_benchmark_suites(
executions, suites, configurator)
self._benchmarks = self._compile_benchmarks()
self.runs = self._compile_runs(configurator)
def _compile_runs(self, configurator):
runs = set()
# pylint: disable-next=too-many-nested-blocks
for bench in self._benchmarks:
if not configurator.run_filter.applies_to_bench(bench):
continue
variables = bench.variables
for cores in variables.cores:
for input_size in variables.input_sizes:
for var_val in variables.variable_values:
for machine in variables.machines:
if not configurator.run_filter.applies_to_machine(machine):
continue
run = self._data_store.create_run_id(
bench, cores, input_size, var_val, machine)
bench.add_run(run)
runs.add(run)
run.add_reporting(self._reporting)
run.add_persistence(self._persistence)
return runs
def _compile_executors_and_benchmark_suites(self, executions, suites, configurator):
# we now assemble the executors and the benchmark suites
results = []
for executor_cfg in executions:
executor_name, executor_details = value_with_optional_details(executor_cfg)
run_details = self._run_details
variables = self._variables
if executor_details:
run_details = ExpRunDetails.compile(executor_details, run_details)
variables = ExpVariables.compile(executor_details, variables)
suites_for_executor = executor_details.get('suites', suites)
else:
suites_for_executor = suites
executor = configurator.get_executor(executor_name, run_details, variables)
for suite_name in suites_for_executor:
suite = BenchmarkSuite.compile(
suite_name, configurator.get_suite(suite_name), executor,
configurator.build_commands)
results.append(suite)
return results
def _compile_benchmarks(self):
bench_cfgs = []
for suite in self._suites:
for bench in suite.benchmarks_config:
bench_cfgs.append(Benchmark.compile(
bench, suite, self._data_store))
return bench_cfgs
| 43.380165 | 88 | 0.658602 | [
"MIT"
] | Joshaa1999/ReBench | rebench/model/experiment.py | 5,249 | Python |
#!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import fire
import json
import jsonpickle
import random
import ast
from typing import Any, Union
from magma.common.redis.client import get_default_client
from magma.common.redis.serializers import get_json_deserializer, \
get_proto_deserializer
from magma.mobilityd.serialize_utils import deserialize_ip_block, \
deserialize_ip_desc
from lte.protos.keyval_pb2 import IPDesc
from lte.protos.policydb_pb2 import PolicyRule, InstalledPolicies
from lte.protos.oai.mme_nas_state_pb2 import MmeNasState, UeContext
from lte.protos.oai.spgw_state_pb2 import SpgwState, S11BearerContext
from lte.protos.oai.s1ap_state_pb2 import S1apState, UeDescription
def _deserialize_session_json(serialized_json_str: bytes) -> str:
"""
Helper function to deserialize sessiond:sessions hash list values
:param serialized_json_str
"""
res = _deserialize_generic_json(str(serialized_json_str, 'utf-8', 'ignore'))
dumped = json.dumps(res, indent=2, sort_keys=True)
return dumped
def _deserialize_generic_json(
element: Union[str, dict, list])-> Union[str, dict, list]:
"""
Helper function to deserialize dictionaries or list with nested
json strings
:param element
"""
if isinstance(element, str):
# try to deserialize as json string
try:
element = ast.literal_eval(element)
except:
try:
element = jsonpickle.decode(element)
except:
return element
if isinstance(element, dict):
keys = element.keys()
elif isinstance(element, list):
keys = range(len(element))
else:
# in case it is neither of the know elements, just return as is
return element
for k in keys:
element[k] = _deserialize_generic_json(element[k])
return element
class StateCLI(object):
"""
CLI for debugging current Magma services state and displaying it
in readable manner.
"""
STATE_DESERIALIZERS = {
'assigned_ip_blocks': deserialize_ip_block,
'ip_states': deserialize_ip_desc,
'sessions': _deserialize_session_json,
'rule_names': get_json_deserializer(),
'rule_ids': get_json_deserializer(),
'rule_versions': get_json_deserializer(),
}
STATE_PROTOS = {
'mme_nas_state': MmeNasState,
'spgw_state': SpgwState,
's1ap_state': S1apState,
'mme': UeContext,
'spgw': S11BearerContext,
's1ap': UeDescription,
'mobilityd_ipdesc_record': IPDesc,
'rules': PolicyRule,
'installed': InstalledPolicies,
}
def __init__(self):
self.client = get_default_client()
def keys(self, redis_key: str):
"""
Get current keys on redis db that match the pattern
Args:
redis_key:pattern to match the redis keys
"""
for k in self.client.keys(pattern="{}*".format(redis_key)):
deserialized_key = k.decode('utf-8')
print(deserialized_key)
def parse(self, key: str):
"""
Parse value of redis key on redis for encoded HASH, SET types, or
JSON / Protobuf encoded state-wrapped types and prints it
Args:
key: key on redis
"""
redis_type = self.client.type(key).decode('utf-8')
key_type = key
if ":" in key:
key_type = key.split(":")[1]
if redis_type == 'hash':
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if not deserializer:
raise AttributeError('Key not found on redis')
self._parse_hash_type(deserializer, key)
elif redis_type == 'set':
deserializer = self.STATE_DESERIALIZERS.get(key_type)
if not deserializer:
raise AttributeError('Key not found on redis')
self._parse_set_type(deserializer, key)
else:
value = self.client.get(key)
# Try parsing as json first, if there's decoding error, parse proto
try:
self._parse_state_json(value)
except UnicodeDecodeError:
self._parse_state_proto(key_type, value)
def corrupt(self, key):
"""
Mostly used for debugging, purposely corrupts state encoded protobuf
in redis, and writes it back to datastore
Args:
key: key on redis
"""
rand_bytes = random.getrandbits(8)
byte_str = bytes([rand_bytes])
self.client[key] = byte_str
print('Corrupted %s in redis' % key)
def _parse_state_json(self, value):
if value:
deserializer = get_json_deserializer()
value = json.loads(jsonpickle.encode(deserializer(value)))
print(json.dumps(value, indent=2, sort_keys=True))
else:
raise AttributeError('Key not found on redis')
def _parse_state_proto(self, key_type, value):
proto = self.STATE_PROTOS.get(key_type.lower())
if proto:
deserializer = get_proto_deserializer(proto)
print(deserializer(value))
else:
raise AttributeError('Key not found on redis')
def _parse_set_type(self, deserializer, key):
set_values = self.client.smembers(key)
for value in set_values:
print(deserializer(value))
def _parse_hash_type(self, deserializer, key):
value = self.client.hgetall(key)
for key, val in value.items():
print(key.decode('utf-8'))
print(deserializer(val))
if __name__ == "__main__":
state_cli = StateCLI()
try:
fire.Fire(state_cli)
except Exception as e:
print('Error: {}'.format(e))
| 31.852041 | 80 | 0.645363 | [
"BSD-3-Clause"
] | Rajpratik71/magma | lte/gateway/python/scripts/state_cli.py | 6,243 | Python |
'''
Created on May 11, 2017
@author: optas
'''
import numpy as np
import tensorflow as tf
from tflearn.layers.normalization import batch_normalization
from tflearn.layers.core import fully_connected, dropout
from . encoders_decoders import encoder_with_convs_and_symmetry, decoder_with_fc_only
from . tf_utils import leaky_relu
from . tf_utils import expand_scope_by_name
def mlp_discriminator(in_signal, non_linearity=tf.nn.relu, reuse=False, scope=None, b_norm=True, dropout_prob=None):
''' used in nips submission.
'''
encoder_args = {'n_filters': [64, 128, 256, 256, 512], 'filter_sizes': [1, 1, 1, 1, 1], 'strides': [1, 1, 1, 1, 1]}
encoder_args['reuse'] = reuse
encoder_args['scope'] = scope
encoder_args['non_linearity'] = non_linearity
encoder_args['dropout_prob'] = dropout_prob
encoder_args['b_norm'] = b_norm
layer = encoder_with_convs_and_symmetry(in_signal, **encoder_args)
name = 'decoding_logits'
scope_e = expand_scope_by_name(scope, name)
d_logit = decoder_with_fc_only(layer, layer_sizes=[128, 64, 1], b_norm=b_norm, reuse=reuse, scope=scope_e)
d_prob = tf.nn.sigmoid(d_logit)
return d_prob, d_logit
def point_cloud_generator(z, pc_dims, layer_sizes=[64, 128, 512, 1024], non_linearity=tf.nn.relu, b_norm=False, b_norm_last=False, dropout_prob=None):
''' used in nips submission.
'''
n_points, dummy = pc_dims
if (dummy != 3):
raise ValueError()
out_signal = decoder_with_fc_only(z, layer_sizes=layer_sizes, non_linearity=non_linearity, b_norm=b_norm)
out_signal = non_linearity(out_signal)
if dropout_prob is not None:
out_signal = dropout(out_signal, dropout_prob)
if b_norm_last:
out_signal = batch_normalization(out_signal)
out_signal = fully_connected(out_signal, np.prod([n_points, 3]), activation='linear', weights_init='xavier')
out_signal = tf.reshape(out_signal, [-1, n_points, 3])
return out_signal
def convolutional_discriminator(in_signal, non_linearity=tf.nn.relu,
encoder_args={'n_filters': [128, 128, 256, 512], 'filter_sizes': [40, 20, 10, 10], 'strides': [1, 2, 2, 1]},
decoder_layer_sizes=[128, 64, 1],
reuse=False, scope=None):
encoder_args['reuse'] = reuse
encoder_args['scope'] = scope
encoder_args['non_linearity'] = non_linearity
layer = encoder_with_convs_and_symmetry(in_signal, **encoder_args)
name = 'decoding_logits'
scope_e = expand_scope_by_name(scope, name)
d_logit = decoder_with_fc_only(layer, layer_sizes=decoder_layer_sizes, non_linearity=non_linearity, reuse=reuse, scope=scope_e)
d_prob = tf.nn.sigmoid(d_logit)
return d_prob, d_logit
def latent_code_generator(z, out_dim, layer_sizes=[64, 128], b_norm=False):
layer_sizes = layer_sizes + out_dim
out_signal = decoder_with_fc_only(z, layer_sizes=layer_sizes, b_norm=b_norm)
out_signal = tf.nn.relu(out_signal)
return out_signal
def latent_code_discriminator(in_singnal, layer_sizes=[64, 128, 256, 256, 512], b_norm=False, non_linearity=tf.nn.relu, reuse=False, scope=None):
layer_sizes = layer_sizes + [1]
d_logit = decoder_with_fc_only(in_singnal, layer_sizes=layer_sizes, non_linearity=non_linearity, b_norm=b_norm, reuse=reuse, scope=scope)
d_prob = tf.nn.sigmoid(d_logit)
return d_prob, d_logit
def latent_code_discriminator_two_layers(in_signal, layer_sizes=[256, 512], b_norm=False, non_linearity=tf.nn.relu, reuse=False, scope=None):
''' Used in ICML submission.
'''
layer_sizes = layer_sizes + [1]
d_logit = decoder_with_fc_only(in_signal, layer_sizes=layer_sizes, non_linearity=non_linearity, b_norm=b_norm, reuse=reuse, scope=scope)
d_prob = tf.nn.sigmoid(d_logit)
return d_prob, d_logit
def latent_code_generator_two_layers(z, out_dim, layer_sizes=[128], b_norm=False):
''' Used in ICML submission.
'''
layer_sizes = layer_sizes + out_dim
out_signal = decoder_with_fc_only(z, layer_sizes=layer_sizes, b_norm=b_norm)
out_signal = tf.nn.relu(out_signal)
return out_signal
| 40 | 150 | 0.717067 | [
"MIT"
] | 15034458181/latent_3d_points | src/generators_discriminators.py | 4,160 | Python |
s = "Ellis"
vowel_count = 0
i = 0
while i < len(s):
if s[i]in ('a', 'e', 'i', 'o', 'u')
vowel_count += 1
i += 1
print(vowel_count) | 11.5 | 36 | 0.514493 | [
"Unlicense"
] | EllisBarnes00/COP-1000 | Chapter 08 - More About Strings/MPL Extra Loops and Strings/51005.py | 138 | Python |
'''
07 - March 29, throughout a decade
Daylight Saving rules are complicated: they're different in different
places, they change over time, and they usually start on a Sunday (and
so they move around the calendar).
For example, in the United Kingdom, as of the time this lesson was written,
Daylight Saving begins on the last Sunday in March. Let's look at the UTC
offset for March 29, at midnight, for the years 2000 to 2010.
Instructions
- Using tz, set the timezone for dt to be 'Europe/London'.
- Within the for loop:
- Use the .replace() method to change the year for dt to be y.
- Call .isoformat() on the result to observe the results.
'''
# Import datetime and tz
from datetime import datetime
from dateutil import tz
# Create starting date
dt = datetime(2000, 3, 29, tzinfo=tz.gettz('Europe/London'))
# Loop over the dates, replacing the year, and print the ISO timestamp
for y in range(2000, 2011):
print(dt.replace(year=y).isoformat())
'''
As you can see, the rules for Daylight Saving are not trivial. When in doubt,
always use tz instead of hand-rolling timezones, so it will catch the Daylight
Saving rules (and rule changes!) for you.
<script.py> output:
2000-03-29T00:00:00+01:00
2001-03-29T00:00:00+01:00
2002-03-29T00:00:00+00:00
2003-03-29T00:00:00+00:00
2004-03-29T00:00:00+01:00
2005-03-29T00:00:00+01:00
2006-03-29T00:00:00+01:00
2007-03-29T00:00:00+01:00
2008-03-29T00:00:00+00:00
2009-03-29T00:00:00+00:00
2010-03-29T00:00:00+01:00
'''
| 31.666667 | 79 | 0.719079 | [
"MIT"
] | mohd-faizy/CAREER-TRACK-Data-Scientist-with-Python | 18_Working with Dates and Times in Python/03_Time Zones and Daylight Saving/07_March 29, throughout a decade.py | 1,520 | Python |
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if root is None:
return 0
return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))
| 26.357143 | 75 | 0.612466 | [
"MIT"
] | anishLearnsToCode/leetcode-algorithms | python/maximum_depth_of_binary_tree.py | 369 | Python |
"""Add genres back
Revision ID: 1d393bb338a4
Revises: 126ecfb9a15e
Create Date: 2020-08-23 12:21:59.354200
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1d393bb338a4'
down_revision = '126ecfb9a15e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('artists', sa.Column('genres', sa.ARRAY(sa.String(length=30)), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('artists', 'genres')
# ### end Alembic commands ###
| 23.241379 | 96 | 0.692878 | [
"MIT"
] | pavponn/fyyur | migrations/versions/1d393bb338a4_add_genres_back.py | 674 | Python |
import decimal
from graphene.types import Scalar
from graphql.language import ast
# See: https://github.com/graphql-python/graphene-django/issues/91#issuecomment-305542169
class Decimal(Scalar):
"""
The `Decimal` scalar type represents a python Decimal.
"""
@staticmethod
def serialize(dec):
assert isinstance(dec, decimal.Decimal), (
'Received not compatible Decimal "{}"'.format(repr(dec))
)
return str(dec)
@staticmethod
def parse_value(value):
return decimal.Decimal(value)
@classmethod
def parse_literal(cls, node):
if isinstance(node, ast.StringValue):
return cls.parse_value(node.value)
| 26.846154 | 89 | 0.669054 | [
"MIT"
] | abdulwahed-dev/travel-expense-manager | backend/backend/core/graphql/scalars.py | 698 | Python |
# Generated by Django 2.0.2 on 2018-02-24 04:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Food',
fields=[
('ndb_no', models.CharField(max_length=10, primary_key=True, serialize=False)),
('description', models.CharField(max_length=69)),
('water', models.FloatField(blank=True, null=True)),
('kcal', models.FloatField(blank=True, null=True)),
('protein', models.FloatField(blank=True, null=True)),
('fat', models.FloatField(blank=True, null=True)),
('carbohydrates', models.FloatField(blank=True, null=True)),
('fiber', models.FloatField(blank=True, null=True)),
('sugar', models.FloatField(blank=True, null=True)),
('calcium', models.FloatField(blank=True, null=True)),
('iron', models.FloatField(blank=True, null=True)),
('magnesium', models.FloatField(blank=True, null=True)),
('potassium', models.FloatField(blank=True, null=True)),
('sodium', models.FloatField(blank=True, null=True)),
('zinc', models.FloatField(blank=True, null=True)),
('folate', models.FloatField(blank=True, null=True)),
('vitamin_b12', models.FloatField(blank=True, null=True)),
('vitamin_a', models.FloatField(blank=True, null=True)),
('vitamin_c', models.FloatField(blank=True, null=True)),
('vitamin_d', models.FloatField(blank=True, null=True)),
],
),
]
| 43.7 | 95 | 0.565217 | [
"MIT"
] | lukebiggerstaff/usda-food-viz | foodviz/search/migrations/0001_initial.py | 1,748 | Python |
class Solution:
def maxNumber(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[int]
"""
def prep(nums, k):
dr = len(nums) - k # 要删除的数目
stay = [] # 保留的list
for num in nums:
# 删除的空间 dr
# 删除的必要 stay[-1] < num 即 堆栈法:上升就替换,下降就保留。
while dr and stay and stay[-1] < num:
stay.pop()
dr -= 1
stay.append(num)
return stay[:k]
def merge(x, y):
return [max(x, y).pop(0) for _ in x + y]
l1 = len(nums1)
l2 = len(nums2)
#dr = l1 + l2 -k
r = [0]
for i in range(k + 1):
# 遍历所有可能并比较大小
if i <= l1 and k-i <= l2:
r = max(merge(prep(nums1, i), prep(nums2, k - i)), r)
return r
if __name__ == "__main__":
n1 = [3, 4, 6, 5]
n2 = [9, 1, 2, 5, 8, 3]
k = 5
so = Solution()
res = so.maxNumber(n1, n2, k)
print(res)
| 24.931818 | 69 | 0.403829 | [
"MIT"
] | wisesky/LeetCode-Practice | src/321. Create Maximum Number.py | 1,191 | Python |
"""Misc. regolith tools.
"""
import email.utils
import os
import platform
import re
import sys
import time
from copy import deepcopy
from calendar import monthrange
from datetime import datetime, date, timedelta
from regolith.dates import month_to_int, date_to_float, get_dates
from regolith.sorters import doc_date_key, id_key, ene_date_key
from regolith.chained_db import ChainDB
try:
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bibdatabase import BibDatabase
HAVE_BIBTEX_PARSER = True
except ImportError:
HAVE_BIBTEX_PARSER = False
LATEX_OPTS = ["-halt-on-error", "-file-line-error"]
if sys.version_info[0] >= 3:
string_types = (str, bytes)
unicode_type = str
else:
pass
# string_types = (str, unicode)
# unicode_type = unicode
DEFAULT_ENCODING = sys.getdefaultencoding()
ON_WINDOWS = platform.system() == "Windows"
ON_MAC = platform.system() == "Darwin"
ON_LINUX = platform.system() == "Linux"
ON_POSIX = os.name == "posix"
def dbdirname(db, rc):
"""Gets the database dir name."""
if db.get("local", False) is False:
dbsdir = os.path.join(rc.builddir, "_dbs")
dbdir = os.path.join(dbsdir, db["name"])
else:
dbdir = db["url"]
return dbdir
def dbpathname(db, rc):
"""Gets the database path name."""
dbdir = dbdirname(db, rc)
dbpath = os.path.join(dbdir, db["path"])
return dbpath
def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if
cond is false. """
def dec(obj):
return obj if cond else backup
return dec
def all_docs_from_collection(client, collname, copy=True):
"""Yield all entries in for all collections of a given name in a given
database. """
yield from client.all_documents(collname, copy=copy)
SHORT_MONTH_NAMES = (
None,
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sept",
"Oct",
"Nov",
"Dec",
)
def date_to_rfc822(y, m, d=1):
"""Converts a date to an RFC 822 formatted string."""
d = datetime(int(y), month_to_int(m), int(d))
return email.utils.format_datetime(d)
def rfc822now():
"""Creates a string of the current time according to RFC 822."""
now = datetime.utcnow()
return email.utils.format_datetime(now)
def gets(seq, key, default=None):
"""Gets a key from every element of a sequence if possible."""
for x in seq:
yield x.get(key, default)
def month_and_year(m=None, y=None):
"""Creates a string from month and year data, if available."""
if y is None:
return "present"
if m is None:
return str(y)
m = month_to_int(m)
return "{0} {1}".format(SHORT_MONTH_NAMES[m], y)
def is_since(y, sy, m=1, d=1, sm=1, sd=1):
"""
tests whether a date is on or since another date
Parameters
----------
y : int
the year to be tested
sy : int
the since year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
sd: int
the since day. Optional, defaults to 1
Returns
-------
True if the target date is the same as, or more recent than, the since date
"""
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
d = "{}/{}/{}".format(d, month_to_int(m), y)
since = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return since <= date
def is_before(y, by, m=12, d=None, bm=12, bd=None):
"""
tests whether a date is on or before another date
Parameters
----------
y : int
the year to be tested
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Dec
d : int
the day to be tested. Defaults to last day of the month
bm : int or str
the before month. Optional, defaults to Dec
bd: int
the before day. Optional, defaults to last day of the month
Returns
-------
True if the target date is the same as, or earlier than, the before date
"""
if not d:
d = monthrange(y, month_to_int(m))[1]
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
b = "{}/{}/{}".format(bd, month_to_int(bm), by)
d = "{}/{}/{}".format(d, month_to_int(m), y)
before = time.mktime(datetime.strptime(b, "%d/%m/%Y").timetuple())
date = time.mktime(datetime.strptime(d, "%d/%m/%Y").timetuple())
return before >= date
def is_between(y, sy, by, m=1, d=1, sm=1, sd=1, bm=12, bd=None):
"""
tests whether a date is on or between two other dates
returns true if the target date is between the since date and the before
date, inclusive.
Parameters
----------
y : int
the year to be tested
sy : int
the since year
by : int
the before year
m : int or str
the month to be tested. Optional, defaults to Jan
d : int
the day to be tested. Defaults to 1
sm : int or str
the since month. Optional, defaults to Jan
bm : int or str
the before month. Optional, defaults to Dec
sd: int
the since day. Optional, defaults to 1
bd: int
the before day. Optional, defaults to 28
Returns
-------
True if the target date is between the since date and the before date,
inclusive (i.e., returns true if the target date is the same as either the
since date or the before date)
"""
if not bd:
bd = monthrange(by, month_to_int(bm))[1]
return is_since(y, sy, m=m, d=d, sm=sm, sd=sd) and is_before(
y, by, m=m, d=d, bm=bm, bd=bd
)
def has_started(sy, sm=None, sd=None):
"""
true if today is after the dates given, inclusive
Parameters
----------
sy : int
the year to check today against
sm : int or str.
the month to check today against. Should be integer or in regolith MONTHS.
default is 1
sd : int.
the day to check today against. Default is 1
Returns
-------
bool
true if today is after dates given
"""
if not sm:
sm = 1
if not sd:
sd = 1
s = "{}/{}/{}".format(sd, month_to_int(sm), sy)
start = time.mktime(datetime.strptime(s, "%d/%m/%Y").timetuple())
return start <= time.time()
def has_finished(ey, em=None, ed=None):
"""
true if today is before the dates given, inclusive
Parameters
----------
ey : int
end year, the year to check today against
em : int or str.
end month, the month to check today against. Should be integer or in regolith MONTHS.
default is 1
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
if not em:
em = 12
if not ed:
ed = monthrange(ey, month_to_int(em))[1]
e = "{}/{}/{}".format(ed, month_to_int(em), ey)
end = time.mktime(datetime.strptime(e, "%d/%m/%Y").timetuple())
return end <= time.time()
def is_current(sy, ey, sm=None, sd=None, em=None, ed=None):
"""
true if today is between the dates given, inclusive
Parameters
----------
sy : int
start year, the year to check today is after
ey : int
end year, the year to check today is before
sm : int or str
start month, the month to check today is after. Should be integer or in
regolith MONTHS. Default is 1
sd : int
start day, the day to check today after. Default is 1
em : int or str.
end month, the month to check today against. Should be integer or in
regolith MONTHS. Default is 12
ed : int.
end-day, the day to check today against. Default is last day of the month
Returns
-------
bool
true if today is before dates given
"""
return has_started(sy, sm, sd) and not has_finished(ey, em, ed)
def filter_publications(citations, authors, reverse=False, bold=True):
"""Filter publications by the author(s)/editor(s)
Parameters
----------
citations : list of dict
The publication citations
authors : set of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
bold : bool, optional
If True put latex bold around the author(s) in question
"""
pubs = []
for pub in citations:
if (
len((set(pub.get("author", [])) | set(
pub.get("editor", []))) & authors)
== 0
):
continue
pub = deepcopy(pub)
if bold:
bold_self = []
for a in pub["author"]:
if a in authors:
bold_self.append("\\textbf{" + a + "}")
else:
bold_self.append(a)
pub["author"] = bold_self
else:
pub = deepcopy(pub)
pubs.append(pub)
pubs.sort(key=doc_date_key, reverse=reverse)
return pubs
def filter_projects(projects, authors, reverse=False):
"""Filter projects by the author(s)
Parameters
----------
projects : list of dict
The publication citations
authors : set of list of str
The authors to be filtered against
reverse : bool, optional
If True reverse the order, defaults to False
"""
projs = []
for proj in projects:
team_names = set(gets(proj["team"], "name"))
if len(team_names & authors) == 0:
continue
# FIXME delete these lines if not required. I think they are wrong (SJLB)
# proj = dict(proj)
# proj["team"] = [x for x in proj["team"] if x["name"] in authors]
projs.append(proj)
projs.sort(key=id_key, reverse=reverse)
return projs
def filter_grants(input_grants, names, pi=True, reverse=True, multi_pi=False):
"""Filter grants by those involved
Parameters
----------
input_grants : list of dict
The grants to filter
names : set of str
The authors to be filtered against
pi : bool, optional
If True add the grant amount to that person's total amount
reverse : bool, optional
If True reverse the order, defaults to False
multi_pi : bool, optional
If True compute sub-awards for multi PI grants, defaults to False
"""
grants = []
total_amount = 0.0
subaward_amount = 0.0
for grant in input_grants:
team_names = set(gets(grant["team"], "name"))
if len(team_names & names) == 0:
continue
grant = deepcopy(grant)
person = [x for x in grant["team"] if x["name"] in names][0]
if pi:
if person["position"].lower() == "pi":
total_amount += grant["amount"]
else:
continue
elif multi_pi:
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["multi_pi"] = any(gets(grant["team"], "subaward_amount"))
else:
if person["position"].lower() == "pi":
continue
else:
total_amount += grant["amount"]
subaward_amount += person.get("subaward_amount", 0.0)
grant["subaward_amount"] = person.get("subaward_amount", 0.0)
grant["pi"] = [
x for x in grant["team"] if x["position"].lower() == "pi"
][0]
grant["me"] = person
grants.append(grant)
grants.sort(key=ene_date_key, reverse=reverse)
return grants, total_amount, subaward_amount
def awards_grants_honors(p):
"""Make sorted awards grants and honors list.
Parameters
----------
p : dict
The person entry
"""
aghs = []
for x in p.get("funding", ()):
d = {
"description": "{0} ({1}{2:,})".format(
latex_safe(x["name"]),
x.get("currency", "$").replace("$", "\$"),
x["value"],
),
"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0)),
}
aghs.append(d)
for x in p.get("service", []) + p.get("honors", []):
d = {"description": latex_safe(x["name"])}
if "year" in x:
d.update(
{"year": x["year"],
"_key": date_to_float(x["year"], x.get("month", 0))}
)
elif "begin_year" in x and "end_year" in x:
d.update(
{
"year": "{}-{}".format(x["begin_year"], x["end_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
elif "begin_year" in x:
d.update(
{
"year": "{}".format(x["begin_year"]),
"_key": date_to_float(x["begin_year"], x.get("month", 0)),
}
)
aghs.append(d)
aghs.sort(key=(lambda x: x.get("_key", 0.0)), reverse=True)
return aghs
HTTP_RE = re.compile(
r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,4}\b([-a-zA-Z0-9@:%_\+.~#?&//=]*)"
)
def latex_safe_url(s):
"""Makes a string that is a URL latex safe."""
return s.replace("#", r"\#")
def latex_safe(s, url_check=True, wrapper="url"):
"""Make string latex safe
Parameters
----------
s : str
url_check : bool, optional
If True check for URLs and wrap them, if False check for URL but don't
wrap, defaults to True
wrapper : str, optional
The wrapper for wrapping urls defaults to url
"""
if not s:
return s
if url_check:
# If it looks like a URL make it a latex URL
url_search = HTTP_RE.search(s)
if url_search:
url = r"{start}\{wrapper}{{{s}}}{end}".format(
start=(latex_safe(s[: url_search.start()])),
end=(latex_safe(s[url_search.end():])),
wrapper=wrapper,
s=latex_safe_url(s[url_search.start(): url_search.end()]),
)
return url
return (
s.replace("&", r"\&")
.replace("$", r"\$")
.replace("#", r"\#")
.replace("_", r"\_")
)
def make_bibtex_file(pubs, pid, person_dir="."):
"""Make a bibtex file given the publications
Parameters
----------
pubs : list of dict
The publications
pid : str
The person id
person_dir : str, optional
The person's directory
"""
if not HAVE_BIBTEX_PARSER:
return None
skip_keys = {"ID", "ENTRYTYPE", "author"}
bibdb = BibDatabase()
bibwriter = BibTexWriter()
bibdb.entries = ents = []
for pub in pubs:
ent = dict(pub)
ent["ID"] = ent.pop("_id")
ent["ENTRYTYPE"] = ent.pop("entrytype")
for n in ["author", "editor"]:
if n in ent:
ent[n] = " and ".join(ent[n])
for key in ent.keys():
if key in skip_keys:
continue
ent[key] = latex_safe(str(ent[key]))
ents.append(ent)
fname = os.path.join(person_dir, pid) + ".bib"
with open(fname, "w", encoding="utf-8") as f:
f.write(bibwriter.write(bibdb))
return fname
def document_by_value(documents, address, value):
"""Get a specific document by one of its values
Parameters
----------
documents: generator
Generator which yields the documents
address: str or tuple
The address of the data in the document
value: any
The expected value for the document
Returns
-------
dict:
The first document which matches the request
"""
if isinstance(address, str):
address = (address,)
for g_doc in documents:
doc = deepcopy(g_doc)
for add in address:
doc = doc[add]
if doc == value:
return g_doc
def fuzzy_retrieval(documents, sources, value, case_sensitive=True):
"""Retrieve a document from the documents where value is compared against
multiple potential sources
Parameters
----------
documents: generator
The documents
sources: iterable
The potential data sources
value:
The value to compare against to find the document of interest
case_sensitive: Bool
When true will match case (Default = True)
Returns
-------
dict:
The document
Examples
--------
>>> fuzzy_retrieval(people, ['aka', 'name'], 'pi_name', case_sensitive = False)
This would get the person entry for which either the alias or the name was
``pi_name``.
"""
for doc in documents:
returns = []
for k in sources:
ret = doc.get(k, [])
if not isinstance(ret, list):
ret = [ret]
returns.extend(ret)
if not case_sensitive:
returns = [reti.lower() for reti in returns if
isinstance(reti, str)]
if isinstance(value, str):
if value.lower() in frozenset(returns):
return doc
else:
if value in frozenset(returns):
return doc
def number_suffix(number):
"""returns the suffix that adjectivises a number (st, nd, rd, th)
Paramters
---------
number: integer
The number. If number is not an integer, returns an empty string
Returns
-------
suffix: string
The suffix (st, nd, rd, th)
"""
if not isinstance(number, (int, float)):
return ""
if 10 < number < 20:
suffix = "th"
else:
suffix = {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th")
return suffix
def dereference_institution(input_record, institutions):
"""Tool for replacing placeholders for institutions with the actual
institution data. Note that the replacement is done inplace
Parameters
----------
input_record : dict
The record to dereference
institutions : iterable of dicts
The institutions
"""
inst = input_record.get("institution") or input_record.get("organization")
if not inst:
error = input_record.get("position") or input_record.get("degree")
print("WARNING: no institution or organization but found {}".format(
error))
db_inst = fuzzy_retrieval(institutions, ["name", "_id", "aka"], inst)
if db_inst:
input_record["institution"] = db_inst["name"]
input_record["organization"] = db_inst["name"]
if db_inst.get("country") == "USA":
state_country = db_inst.get("state")
else:
state_country = db_inst.get("country")
input_record["location"] = "{}, {}".format(db_inst["city"],
state_country)
if not db_inst.get("departments"):
print("WARNING: no departments in {}. {} sought".format(
db_inst.get("_id"), inst))
if "department" in input_record and db_inst.get("departments"):
input_record["department"] = fuzzy_retrieval(
[db_inst["departments"]], ["name", "aka"],
input_record["department"]
)
else:
input_record["department"] = inst
def merge_collections(a, b, target_id):
"""
merge two collections into a single merged collection
for keys that are in both collections, the value in b will be kept
Parameters
----------
a the inferior collection (will lose values of shared keys)
b the superior collection (will keep values of shared keys)
target_id str the name of the key used in b to dereference ids in a
Returns
-------
the combined collection. Note that it returns a collection only containing
merged items from a and b that are dereferenced in b, i.e., the merged
intercept. If you want the union you can update the returned collection
with a.
Examples
--------
>>> grants = merge_collections(self.gtx["proposals"], self.gtx["grants"], "proposal_id")
This would merge all entries in the proposals collection with entries in the
grants collection for which "_id" in proposals has the value of
"proposal_id" in grants.
"""
adict = {}
for k in a:
adict[k.get("_id")] = k
bdict = {}
for k in b:
bdict[k.get("_id")] = k
b_for_a = {}
for k in adict:
for kk, v in bdict.items():
if v.get(target_id, "") == k:
b_for_a[k] = kk
chained = {}
for k, v in b_for_a.items():
chained[k] = ChainDB(adict[k], bdict[v])
return list(chained.values())
def update_schemas(default_schema, user_schema):
"""
Merging the user schema into the default schema recursively and return the
merged schema. The default schema and user schema will not be modified
during the merging.
Parameters
----------
default_schema : dict
The default schema.
user_schema : dict
The user defined schema.
Returns
-------
updated_schema : dict
The merged schema.
"""
updated_schema = deepcopy(default_schema)
for key in user_schema.keys():
if (key in updated_schema) and isinstance(updated_schema[key],
dict) and isinstance(
user_schema[key], dict):
updated_schema[key] = update_schemas(updated_schema[key],
user_schema[key])
else:
updated_schema[key] = user_schema[key]
return updated_schema
def is_fully_loaded(appts):
status = True
earliest, latest = date.today(), date.today()
for appt in appts:
dates = get_dates(appt)
begin_date = dates['begin_date']
end_date = dates['end_date']
if latest == date.today():
latest = end_date
appt['begin_date'] = begin_date
appt['end_date'] = end_date
if begin_date < earliest:
earliest = begin_date
if end_date > latest:
latest = end_date
datearray = []
timespan = latest - earliest
for x in range(0, timespan.days):
datearray.append(earliest + timedelta(days=x))
loading = [0] * len(datearray)
for day in datearray:
for appt in appts:
if appt['begin_date'] <= day <= appt["end_date"]:
loading[datearray.index(day)] = loading[datearray.index(day)] + \
appt.get("loading")
if max(loading) > 1.0:
status = False
print("max {} at {}".format(max(loading),
datearray[
list(loading).index(max(loading))]))
elif min(loading) < 1.0:
status = False
print("min {} at {}".format(min(loading),
datearray[list(loading).index(min(loading))]
))
return status
def group(db, by):
"""
Group the document in the database according to the value of the doc[by] in db.
Parameters
----------
db : iterable
The database of documents.
by : basestring
The key to group the documents.
Returns
-------
grouped: dict
A dictionary mapping the feature value of group to the list of docs. All docs in the same generator have
the same value of doc[by].
Examples
--------
Here, we use a tuple of dict as an example of the database.
>>> db = ({"k": "v0"}, {"k": "v1"}, {"k": "v0"})
>>> group(db)
This will return
>>> {"v0": [{"k": "v0"}, {"k": "v0"}], "v1": [{"k": "v1"}]}
"""
grouped = {}
doc: dict
for doc in db:
key = doc.get(by)
if not key:
print("There is no field {} in {}".format(by, id_key(doc)))
elif key not in grouped:
grouped[key] = [doc]
else:
grouped[key].append(doc)
return grouped
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
def group_member_ids(ppl_coll, grpname):
"""Get a list of all group member ids
Parameters
----------
ppl_coll: collection (list of dicts)
The people collection that should contain the group members
grp: string
The id of the group in groups.yml
Returns
-------
set:
The set of ids of the people in the group
Notes
-----
- Groups that are being tracked are listed in the groups.yml collection
with a name and an id.
- People are in a group during an educational or employment period.
- To assign a person to a tracked group during one such period, add
a "group" key to that education/employment item with a value
that is the group id.
- This function takes the group id that is passed and searches
the people collection for all people that have been
assigned to that group in some period of time and returns a list of
"""
grpmembers = set()
for person in ppl_coll:
for k in ["education", "employment"]:
for position in person.get(k, {}):
if position.get("group", None) == grpname:
grpmembers.add(person["_id"])
return grpmembers
| 29.21317 | 112 | 0.570162 | [
"CC0-1.0"
] | jc-umana/regolith | regolith/tools.py | 26,175 | Python |
"""
This module is for performance testing of EDA module in github action.
"""
from functools import partial
import pandas as pd
from typing import Any
from ...datasets import load_dataset
from ...eda import create_report
def report_func(df: pd.DataFrame, **kwargs: Any) -> None:
"""
Create report function, used for performance testing.
"""
create_report(df, **kwargs)
def test_create_report(benchmark: Any) -> None:
"""
Performance test of create report on titanic dataset.
"""
df = load_dataset("titanic")
benchmark(partial(report_func), df)
| 24.375 | 70 | 0.705983 | [
"MIT"
] | Bowen0729/dataprep | dataprep/tests/benchmarks/eda.py | 585 | Python |
async def donut_handler(args):
print('This is where we would configure things to properly handle a .donut file request.')
return '', ''
| 36 | 94 | 0.708333 | [
"Apache-2.0"
] | zaphodef/stockpile | app/donut.py | 144 | Python |
import time
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from absl import app, flags, logging
from absl.flags import FLAGS
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from PIL import Image
import cv2
import numpy as np
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
flags.DEFINE_string('framework', 'tf', '(tf, tflite, trt')
flags.DEFINE_string('weights', './checkpoints/yolov4-416',
'path to weights file')
flags.DEFINE_integer('size', 416, 'resize images to')
flags.DEFINE_boolean('tiny', False, 'yolo or yolo-tiny')
flags.DEFINE_string('model', 'yolov4', 'yolov3 or yolov4')
flags.DEFINE_string('video', './data/road.mp4', 'path to input video')
flags.DEFINE_float('iou', 0.45, 'iou threshold')
flags.DEFINE_float('score', 0.25, 'score threshold')
flags.DEFINE_string('output', None, 'path to output video')
flags.DEFINE_string('output_format', 'XVID', 'codec used in VideoWriter when saving video to file')
flags.DEFINE_boolean('dis_cv2_window', False, 'disable cv2 window during the process') # this is good for the .ipynb
flags.DEFINE_string('class_file_name', './data/classes/coco.names', 'file contain the class names')
def main(_argv):
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES, ANCHORS, NUM_CLASS, XYSCALE = utils.load_config(FLAGS)
input_size = FLAGS.size
video_path = FLAGS.video
print("Video from: ", video_path )
vid = cv2.VideoCapture(video_path)
if FLAGS.framework == 'tflite':
interpreter = tf.lite.Interpreter(model_path=FLAGS.weights)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
else:
saved_model_loaded = tf.saved_model.load(FLAGS.weights, tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
if FLAGS.output:
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
out = cv2.VideoWriter(FLAGS.output, codec, fps, (width, height))
frame_id = 0
while True:
return_value, frame = vid.read()
if return_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
if frame_id == vid.get(cv2.CAP_PROP_FRAME_COUNT):
print("Video processing complete")
break
raise ValueError("No image! Try with another video format")
frame_size = frame.shape[:2]
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
prev_time = time.time()
if FLAGS.framework == 'tflite':
interpreter.set_tensor(input_details[0]['index'], image_data)
interpreter.invoke()
pred = [interpreter.get_tensor(output_details[i]['index']) for i in range(len(output_details))]
if FLAGS.model == 'yolov3' and FLAGS.tiny == True:
boxes, pred_conf = filter_boxes(pred[1], pred[0], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
boxes, pred_conf = filter_boxes(pred[0], pred[1], score_threshold=0.25,
input_shape=tf.constant([input_size, input_size]))
else:
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=FLAGS.iou,
score_threshold=FLAGS.score
)
pred_bbox = [boxes.numpy(), scores.numpy(), classes.numpy(), valid_detections.numpy()]
image = utils.draw_bbox(frame, pred_bbox,class_file_name=FLAGS.class_file_name))
curr_time = time.time()
exec_time = curr_time - prev_time
result = np.asarray(image)
info = "time: %.2f ms" %(1000*exec_time)
print(info)
result = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if not FLAGS.dis_cv2_window:
cv2.namedWindow("result", cv2.WINDOW_AUTOSIZE)
cv2.imshow("result", result)
if cv2.waitKey(1) & 0xFF == ord('q'): break
if FLAGS.output:
out.write(result)
frame_id += 1
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| 42.449612 | 116 | 0.646092 | [
"MIT"
] | gaowei0518/tensorflow-yolov4-tflite | detectvideo.py | 5,476 | Python |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import math
import dace
import polybench
N = dace.symbol('N')
#datatypes = [dace.float64, dace.int32, dace.float32]
datatype = dace.float64
# Dataset sizes
sizes = [{N: 30}, {N: 90}, {N: 250}, {N: 1300}, {N: 2800}]
args = [([N, N], datatype), ([N, N], datatype), ([N], datatype), ([N], datatype), ([N], datatype), ([1], datatype),
([1], datatype)]
outputs = [(4, 'y')]
def init_array(A, B, tmp, x, y, alpha, beta):
n = N.get()
alpha[0] = datatype(1.5)
beta[0] = datatype(1.2)
for i in range(n):
x[i] = datatype(i % n) / n
for j in range(n):
A[i, j] = datatype((i * j + 1) % n) / n
B[i, j] = datatype((i * j + 2) % n) / n
@dace.program(datatype[N, N], datatype[N, N], datatype[N], datatype[N], datatype[N], datatype[1], datatype[1])
def gesummv(A, B, tmp, x, y, alpha, beta):
@dace.map
def compute_ty(i: _[0:N], j: _[0:N]):
ia << A[i, j]
ib << B[i, j]
ix << x[j]
ot >> tmp(1, lambda a, b: a + b, 0)[i]
oy >> y(1, lambda a, b: a + b, 0)[i]
ot = ia * ix
oy = ib * ix
@dace.map
def update_y(i: _[0:N]):
iy << y[i]
ialpha << alpha
ibeta << beta
it << tmp[i]
oy >> y[i]
oy = ialpha * it + ibeta * iy
if __name__ == '__main__':
polybench.main(sizes, args, outputs, init_array, gesummv)
| 25.137931 | 115 | 0.508916 | [
"BSD-3-Clause"
] | Berke-Ates/dace | samples/polybench/gesummv.py | 1,458 | Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 unga <[email protected]>
#
# Distributed under terms of the MIT license.
"""
Monitor a set of parameters in a background thread
stream output over websocket
To start monitor, run this file, or if qcodes is installed as a module:
``% python -m qcodes.monitor.monitor``
Add parameters to monitor in your measurement by creating a new monitor with a
list of parameters to monitor:
``monitor = qcodes.Monitor(param1, param2, param3, ...)``
"""
import sys
import logging
import os
import time
import json
from contextlib import suppress
from typing import Dict, Union, Any, Optional, Sequence, Callable, Awaitable
from collections import defaultdict
import asyncio
from asyncio import CancelledError
from threading import Thread, Event
import socketserver
import webbrowser
import websockets
from qcodes.instrument.parameter import Parameter
if sys.version_info < (3, 7):
all_tasks = asyncio.Task.all_tasks
else:
all_tasks = asyncio.all_tasks
WEBSOCKET_PORT = 5678
SERVER_PORT = 3000
log = logging.getLogger(__name__)
def _get_metadata(*parameters: Parameter) -> Dict[str, Any]:
"""
Return a dictionary that contains the parameter metadata grouped by the
instrument it belongs to.
"""
metadata_timestamp = time.time()
# group metadata by instrument
metas: dict = defaultdict(list)
for parameter in parameters:
# Get the latest value from the parameter,
# respecting the max_val_age parameter
meta: Dict[str, Optional[Union[float, str]]] = {}
meta["value"] = str(parameter.get_latest())
timestamp = parameter.get_latest.get_timestamp()
if timestamp is not None:
meta["ts"] = timestamp.timestamp()
else:
meta["ts"] = None
meta["name"] = parameter.label or parameter.name
meta["unit"] = parameter.unit
# find the base instrument that this parameter belongs to
baseinst = parameter.root_instrument
if baseinst is None:
metas["Unbound Parameter"].append(meta)
else:
metas[str(baseinst)].append(meta)
# Create list of parameters, grouped by instrument
parameters_out = []
for instrument in metas:
temp = {"instrument": instrument, "parameters": metas[instrument]}
parameters_out.append(temp)
state = {"ts": metadata_timestamp, "parameters": parameters_out}
return state
def _handler(parameters: Sequence[Parameter], interval: float) \
-> Callable[[websockets.WebSocketServerProtocol, str], Awaitable[None]]:
"""
Return the websockets server handler.
"""
async def server_func(websocket: websockets.WebSocketServerProtocol, _: str) -> None:
"""
Create a websockets handler that sends parameter values to a listener
every "interval" seconds.
"""
while True:
try:
# Update the parameter values
try:
meta = _get_metadata(*parameters)
except ValueError:
log.exception("Error getting parameters")
break
log.debug("sending.. to %r", websocket)
await websocket.send(json.dumps(meta))
# Wait for interval seconds and then send again
await asyncio.sleep(interval)
except (CancelledError, websockets.exceptions.ConnectionClosed):
log.debug("Got CancelledError or ConnectionClosed",
exc_info=True)
break
log.debug("Closing websockets connection")
return server_func
class Monitor(Thread):
"""
QCodes Monitor - WebSockets server to monitor qcodes parameters.
"""
running = None
def __init__(self, *parameters: Parameter, interval: float = 1):
"""
Monitor qcodes parameters.
Args:
*parameters: Parameters to monitor.
interval: How often one wants to refresh the values.
"""
super().__init__()
# Check that all values are valid parameters
for parameter in parameters:
if not isinstance(parameter, Parameter):
raise TypeError(f"We can only monitor QCodes "
f"Parameters, not {type(parameter)}")
self.loop: Optional[asyncio.AbstractEventLoop] = None
self.server: Optional[websockets.WebSocketServer] = None
self._parameters = parameters
self.loop_is_closed = Event()
self.server_is_started = Event()
self.handler = _handler(parameters, interval=interval)
log.debug("Start monitoring thread")
if Monitor.running:
# stop the old server
log.debug("Stopping and restarting server")
Monitor.running.stop()
self.start()
# Wait until the loop is running
self.server_is_started.wait(timeout=5)
if not self.server_is_started.is_set():
raise RuntimeError("Failed to start server")
Monitor.running = self
def run(self) -> None:
"""
Start the event loop and run forever.
"""
log.debug("Running Websocket server")
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
try:
server_start = websockets.serve(self.handler, '127.0.0.1',
WEBSOCKET_PORT, close_timeout=1)
self.server = self.loop.run_until_complete(server_start)
self.server_is_started.set()
self.loop.run_forever()
except OSError:
# The code above may throw an OSError
# if the socket cannot be bound
log.exception("Server could not be started")
finally:
log.debug("loop stopped")
log.debug("Pending tasks at close: %r",
all_tasks(self.loop))
self.loop.close()
log.debug("loop closed")
self.loop_is_closed.set()
def update_all(self) -> None:
"""
Update all parameters in the monitor.
"""
for parameter in self._parameters:
# call get if it can be called without arguments
with suppress(TypeError):
parameter.get()
def stop(self) -> None:
"""
Shutdown the server, close the event loop and join the thread.
Setting active Monitor to ``None``.
"""
self.join()
Monitor.running = None
async def __stop_server(self) -> None:
log.debug("asking server %r to close", self.server)
if self.server is not None:
self.server.close()
log.debug("waiting for server to close")
if self.loop is not None and self.server is not None:
await self.loop.create_task(self.server.wait_closed())
log.debug("stopping loop")
if self.loop is not None:
log.debug("Pending tasks at stop: %r",
all_tasks(self.loop))
self.loop.stop()
def join(self, timeout: Optional[float] = None) -> None:
"""
Overwrite ``Thread.join`` to make sure server is stopped before
joining avoiding a potential deadlock.
"""
log.debug("Shutting down server")
if not self.is_alive():
# we run this check before trying to run to prevent a cryptic
# error message
log.debug("monitor is dead")
return
try:
if self.loop is not None:
asyncio.run_coroutine_threadsafe(self.__stop_server(),
self.loop)
except RuntimeError:
# the above may throw a runtime error if the loop is already
# stopped in which case there is nothing more to do
log.exception("Could not close loop")
self.loop_is_closed.wait(timeout=5)
if not self.loop_is_closed.is_set():
raise RuntimeError("Failed to join loop")
log.debug("Loop reported closed")
super().join(timeout=timeout)
log.debug("Monitor Thread has joined")
@staticmethod
def show() -> None:
"""
Overwrite this method to show/raise your monitor GUI
F.ex.
::
import webbrowser
url = "localhost:3000"
# Open URL in new window, raising the window if possible.
webbrowser.open_new(url)
"""
webbrowser.open("http://localhost:{}".format(SERVER_PORT))
if __name__ == "__main__":
import http.server
# If this file is run, create a simple webserver that serves a simple
# website that can be used to view monitored parameters.
STATIC_DIR = os.path.join(os.path.dirname(__file__), 'dist')
os.chdir(STATIC_DIR)
try:
log.info("Starting HTTP Server at http://localhost:%i", SERVER_PORT)
with socketserver.TCPServer(("", SERVER_PORT),
http.server.SimpleHTTPRequestHandler) as httpd:
log.debug("serving directory %s", STATIC_DIR)
webbrowser.open("http://localhost:{}".format(SERVER_PORT))
httpd.serve_forever()
except KeyboardInterrupt:
log.info("Shutting Down HTTP Server")
| 33.967509 | 89 | 0.610054 | [
"MIT"
] | Akshita07/Qcodes | qcodes/monitor/monitor.py | 9,410 | Python |
#!/usr/bin/env python3
# Copyright (c) 2019 The TradePlus_Coin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# -*- coding: utf-8 -*-
from io import BytesIO
from struct import pack
from random import randint, choice
import time
from test_framework.authproxy import JSONRPCException
from test_framework.blocktools import create_coinbase, create_block
from test_framework.key import CECKey
from test_framework.messages import CTransaction, CTxOut, CTxIn, COIN, msg_block
from test_framework.mininode import network_thread_start
from test_framework.test_framework import BitcoinTestFramework
from test_framework.script import CScript, OP_CHECKSIG
from test_framework.util import hash256, bytes_to_hex_str, hex_str_to_bytes, connect_nodes_bi, p2p_port
from .util import TestNode, create_transaction, utxo_to_stakingPrevOuts, dir_size
''' -------------------------------------------------------------------------
TradePlus_Coin_FakeStakeTest CLASS ----------------------------------------------------
General Test Class to be extended by individual tests for each attack test
'''
class TradePlus_Coin_FakeStakeTest(BitcoinTestFramework):
def set_test_params(self):
''' Setup test environment
:param:
:return:
'''
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-staking=1', '-debug=net']]*self.num_nodes
def setup_network(self):
''' Can't rely on syncing all the nodes when staking=1
:param:
:return:
'''
self.setup_nodes()
for i in range(self.num_nodes - 1):
for j in range(i+1, self.num_nodes):
connect_nodes_bi(self.nodes, i, j)
def init_test(self):
''' Initializes test parameters
:param:
:return:
'''
title = "*** Starting %s ***" % self.__class__.__name__
underline = "-" * len(title)
self.log.info("\n\n%s\n%s\n%s\n", title, underline, self.description)
# Global Test parameters (override in run_test)
self.DEFAULT_FEE = 0.1
# Spam blocks to send in current test
self.NUM_BLOCKS = 30
# Setup the p2p connections and start up the network thread.
self.test_nodes = []
for i in range(self.num_nodes):
self.test_nodes.append(TestNode())
self.test_nodes[i].peer_connect('127.0.0.1', p2p_port(i))
network_thread_start() # Start up network handling in another thread
self.node = self.nodes[0]
# Let the test nodes get in sync
for i in range(self.num_nodes):
self.test_nodes[i].wait_for_verack()
def run_test(self):
''' Performs the attack of this test - run init_test first.
:param:
:return:
'''
self.description = ""
self.init_test()
return
def create_spam_block(self, hashPrevBlock, stakingPrevOuts, height, fStakeDoubleSpent=False, fZPoS=False, spendingPrevOuts={}):
''' creates a block to spam the network with
:param hashPrevBlock: (hex string) hash of previous block
stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)
map outpoints (to be used as staking inputs) to amount, block_time, nStakeModifier, hashStake
height: (int) block height
fStakeDoubleSpent: (bool) spend the coinstake input inside the block
fZPoS: (bool) stake the block with zerocoin
spendingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)
map outpoints (to be used as tx inputs) to amount, block_time, nStakeModifier, hashStake
:return block: (CBlock) generated block
'''
# If not given inputs to create spam txes, use a copy of the staking inputs
if len(spendingPrevOuts) == 0:
spendingPrevOuts = dict(stakingPrevOuts)
# Get current time
current_time = int(time.time())
nTime = current_time & 0xfffffff0
# Create coinbase TX
# Even if PoS blocks have empty coinbase vout, the height is required for the vin script
coinbase = create_coinbase(height)
coinbase.vout[0].nValue = 0
coinbase.vout[0].scriptPubKey = b""
coinbase.nTime = nTime
coinbase.rehash()
# Create Block with coinbase
block = create_block(int(hashPrevBlock, 16), coinbase, nTime)
# Find valid kernel hash - Create a new private key used for block signing.
if not block.solve_stake(stakingPrevOuts):
raise Exception("Not able to solve for any prev_outpoint")
# Sign coinstake TX and add it to the block
signed_stake_tx = self.sign_stake_tx(block, stakingPrevOuts[block.prevoutStake][0], fZPoS)
block.vtx.append(signed_stake_tx)
# Remove coinstake input prevout unless we want to try double spending in the same block.
# Skip for zPoS as the spendingPrevouts are just regular UTXOs
if not fZPoS and not fStakeDoubleSpent:
del spendingPrevOuts[block.prevoutStake]
# remove a random prevout from the list
# (to randomize block creation if the same height is picked two times)
if len(spendingPrevOuts) > 0:
del spendingPrevOuts[choice(list(spendingPrevOuts))]
# Create spam for the block. Sign the spendingPrevouts
for outPoint in spendingPrevOuts:
value_out = int(spendingPrevOuts[outPoint][0] - self.DEFAULT_FEE * COIN)
tx = create_transaction(outPoint, b"", value_out, nTime, scriptPubKey=CScript([self.block_sig_key.get_pubkey(), OP_CHECKSIG]))
# sign txes
signed_tx_hex = self.node.signrawtransaction(bytes_to_hex_str(tx.serialize()))['hex']
signed_tx = CTransaction()
signed_tx.deserialize(BytesIO(hex_str_to_bytes(signed_tx_hex)))
block.vtx.append(signed_tx)
# Get correct MerkleRoot and rehash block
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
# Sign block with coinstake key and return it
block.sign_block(self.block_sig_key)
return block
def spend_utxo(self, utxo, address_list):
''' spend amount from previously unspent output to a provided address
:param utxo: (JSON) returned from listunspent used as input
addresslist: (string) destination address
:return: txhash: (string) tx hash if successful, empty string otherwise
'''
try:
inputs = [{"txid":utxo["txid"], "vout":utxo["vout"]}]
out_amount = (float(utxo["amount"]) - self.DEFAULT_FEE)/len(address_list)
outputs = {}
for address in address_list:
outputs[address] = out_amount
spendingTx = self.node.createrawtransaction(inputs, outputs)
spendingTx_signed = self.node.signrawtransaction(spendingTx)
if spendingTx_signed["complete"]:
txhash = self.node.sendrawtransaction(spendingTx_signed["hex"])
return txhash
else:
self.log.warning("Error: %s" % str(spendingTx_signed["errors"]))
return ""
except JSONRPCException as e:
self.log.error("JSONRPCException: %s" % str(e))
return ""
def spend_utxos(self, utxo_list, address_list = []):
''' spend utxos to provided list of addresses or 10 new generate ones.
:param utxo_list: (JSON list) returned from listunspent used as input
address_list: (string list) [optional] recipient TradePlus_Coin addresses. if not set,
10 new addresses will be generated from the wallet for each tx.
:return: txHashes (string list) tx hashes
'''
txHashes = []
# If not given, get 10 new addresses from self.node wallet
if address_list == []:
for i in range(10):
address_list.append(self.node.getnewaddress())
for utxo in utxo_list:
try:
# spend current utxo to provided addresses
txHash = self.spend_utxo(utxo, address_list)
if txHash != "":
txHashes.append(txHash)
except JSONRPCException as e:
self.log.error("JSONRPCException: %s" % str(e))
continue
return txHashes
def stake_amplification_step(self, utxo_list, address_list = []):
''' spends a list of utxos providing the list of new outputs
:param utxo_list: (JSON list) returned from listunspent used as input
address_list: (string list) [optional] recipient TradePlus_Coin addresses.
:return: new_utxos: (JSON list) list of new (valid) inputs after the spends
'''
self.log.info("--> Stake Amplification step started with %d UTXOs", len(utxo_list))
txHashes = self.spend_utxos(utxo_list, address_list)
num_of_txes = len(txHashes)
new_utxos = []
if num_of_txes> 0:
self.log.info("Created %d transactions...Mining 2 blocks to include them..." % num_of_txes)
self.node.generate(2)
time.sleep(2)
new_utxos = self.node.listunspent()
self.log.info("Amplification step produced %d new \"Fake Stake\" inputs:" % len(new_utxos))
return new_utxos
def stake_amplification(self, utxo_list, iterations, address_list = []):
''' performs the "stake amplification" which gives higher chances at finding fake stakes
:param utxo_list: (JSON list) returned from listunspent used as input
iterations: (int) amount of stake amplification steps to perform
address_list: (string list) [optional] recipient TradePlus_Coin addresses.
:return: all_inputs: (JSON list) list of all spent inputs
'''
self.log.info("** Stake Amplification started with %d UTXOs", len(utxo_list))
valid_inputs = utxo_list
all_inputs = []
for i in range(iterations):
all_inputs = all_inputs + valid_inputs
old_inputs = valid_inputs
valid_inputs = self.stake_amplification_step(old_inputs, address_list)
self.log.info("** Stake Amplification ended with %d \"fake\" UTXOs", len(all_inputs))
return all_inputs
def sign_stake_tx(self, block, stake_in_value, fZPoS=False):
''' signs a coinstake transaction
:param block: (CBlock) block with stake to sign
stake_in_value: (int) staked amount
fZPoS: (bool) zerocoin stake
:return: stake_tx_signed: (CTransaction) signed tx
'''
self.block_sig_key = CECKey()
if fZPoS:
self.log.info("Signing zPoS stake...")
# Create raw zerocoin stake TX (signed)
raw_stake = self.node.createrawzerocoinstake(block.prevoutStake)
stake_tx_signed_raw_hex = raw_stake["hex"]
# Get stake TX private key to sign the block with
stake_pkey = raw_stake["private-key"]
self.block_sig_key.set_compressed(True)
self.block_sig_key.set_secretbytes(bytes.fromhex(stake_pkey))
else:
# Create a new private key and get the corresponding public key
self.block_sig_key.set_secretbytes(hash256(pack('<I', 0xffff)))
pubkey = self.block_sig_key.get_pubkey()
# Create the raw stake TX (unsigned)
scriptPubKey = CScript([pubkey, OP_CHECKSIG])
outNValue = int(stake_in_value + 2*COIN)
stake_tx_unsigned = CTransaction()
stake_tx_unsigned.nTime = block.nTime
stake_tx_unsigned.vin.append(CTxIn(block.prevoutStake))
stake_tx_unsigned.vin[0].nSequence = 0xffffffff
stake_tx_unsigned.vout.append(CTxOut())
stake_tx_unsigned.vout.append(CTxOut(outNValue, scriptPubKey))
# Sign the stake TX
stake_tx_signed_raw_hex = self.node.signrawtransaction(bytes_to_hex_str(stake_tx_unsigned.serialize()))['hex']
# Deserialize the signed raw tx into a CTransaction object and return it
stake_tx_signed = CTransaction()
stake_tx_signed.deserialize(BytesIO(hex_str_to_bytes(stake_tx_signed_raw_hex)))
return stake_tx_signed
def get_prevouts(self, utxo_list, blockHeight, zpos=False):
''' get prevouts (map) for each utxo in a list
:param utxo_list: <if zpos=False> (JSON list) utxos returned from listunspent used as input
<if zpos=True> (JSON list) mints returned from listmintedzerocoins used as input
blockHeight: (int) height of the previous block
zpos: (bool) type of utxo_list
:return: stakingPrevOuts: ({COutPoint --> (int, int, int, str)} dictionary)
map outpoints to amount, block_time, nStakeModifier, hashStake
'''
zerocoinDenomList = [1, 5, 10, 50, 100, 500, 1000, 5000]
stakingPrevOuts = {}
for utxo in utxo_list:
if zpos:
# get mint checkpoint
checkpointHeight = blockHeight - 200
checkpointBlock = self.node.getblock(self.node.getblockhash(checkpointHeight), True)
checkpoint = int(checkpointBlock['acc_checkpoint'], 16)
# parse checksum and get checksumblock
pos = zerocoinDenomList.index(utxo['denomination'])
checksum = (checkpoint >> (32 * (len(zerocoinDenomList) - 1 - pos))) & 0xFFFFFFFF
checksumBlock = self.node.getchecksumblock(hex(checksum), utxo['denomination'], True)
# get block hash and block time
txBlockhash = checksumBlock['hash']
txBlocktime = checksumBlock['time']
else:
# get raw transaction for current input
utxo_tx = self.node.getrawtransaction(utxo['txid'], 1)
# get block hash and block time
txBlocktime = utxo_tx['blocktime']
txBlockhash = utxo_tx['blockhash']
# get Stake Modifier
stakeModifier = int(self.node.getblock(txBlockhash)['modifier'], 16)
# assemble prevout object
utxo_to_stakingPrevOuts(utxo, stakingPrevOuts, txBlocktime, stakeModifier, zpos)
return stakingPrevOuts
def log_data_dir_size(self):
''' Prints the size of the '/regtest/blocks' directory.
:param:
:return:
'''
init_size = dir_size(self.node.datadir + "/regtest/blocks")
self.log.info("Size of data dir: %s kilobytes" % str(init_size))
def test_spam(self, name, staking_utxo_list,
fRandomHeight=False, randomRange=0, randomRange2=0,
fDoubleSpend=False, fMustPass=False, fZPoS=False,
spending_utxo_list=[]):
''' General method to create, send and test the spam blocks
:param name: (string) chain branch (usually either "Main" or "Forked")
staking_utxo_list: (string list) utxos to use for staking
fRandomHeight: (bool) send blocks at random height
randomRange: (int) if fRandomHeight=True, height is >= current-randomRange
randomRange2: (int) if fRandomHeight=True, height is < current-randomRange2
fDoubleSpend: (bool) if true, stake input is double spent in block.vtx
fMustPass: (bool) if true, the blocks must be stored on disk
fZPoS: (bool) stake the block with zerocoin
spending_utxo_list: (string list) utxos to use for spending
:return: err_msgs: (string list) reports error messages from the test
or an empty list if test is successful
'''
# Create empty error messages list
err_msgs = []
# Log initial datadir size
self.log_data_dir_size()
# Get latest block number and hash
block_count = self.node.getblockcount()
pastBlockHash = self.node.getblockhash(block_count)
randomCount = block_count
self.log.info("Current height: %d" % block_count)
for i in range(0, self.NUM_BLOCKS):
if i !=0:
self.log.info("Sent %d blocks out of %d" % (i, self.NUM_BLOCKS))
# if fRandomHeight=True get a random block number (in range) and corresponding hash
if fRandomHeight:
randomCount = randint(block_count - randomRange, block_count - randomRange2)
pastBlockHash = self.node.getblockhash(randomCount)
# Get spending prevouts and staking prevouts for the height of current block
current_block_n = randomCount + 1
stakingPrevOuts = self.get_prevouts(staking_utxo_list, randomCount, zpos=fZPoS)
spendingPrevOuts = self.get_prevouts(spending_utxo_list, randomCount)
# Create the spam block
block = self.create_spam_block(pastBlockHash, stakingPrevOuts, current_block_n,
fStakeDoubleSpent=fDoubleSpend, fZPoS=fZPoS, spendingPrevOuts=spendingPrevOuts)
# Log time and size of the block
block_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(block.nTime))
block_size = len(block.serialize())/1000
self.log.info("Sending block %d [%s...] - nTime: %s - Size (kb): %.2f",
current_block_n, block.hash[:7], block_time, block_size)
# Try submitblock
var = self.node.submitblock(bytes_to_hex_str(block.serialize()))
time.sleep(1)
if (not fMustPass and var not in [None, "bad-txns-invalid-ztdps"]) or (fMustPass and var != "inconclusive"):
self.log.error("submitblock [fMustPass=%s] result: %s" % (str(fMustPass), str(var)))
err_msgs.append("submitblock %d: %s" % (current_block_n, str(var)))
# Try sending the message block
msg = msg_block(block)
try:
self.test_nodes[0].handle_connect()
self.test_nodes[0].send_message(msg)
time.sleep(2)
block_ret = self.node.getblock(block.hash)
if not fMustPass and block_ret is not None:
self.log.error("Error, block stored in %s chain" % name)
err_msgs.append("getblock %d: result not None" % current_block_n)
if fMustPass:
if block_ret is None:
self.log.error("Error, block NOT stored in %s chain" % name)
err_msgs.append("getblock %d: result is None" % current_block_n)
else:
self.log.info("Good. Block IS stored on disk.")
except JSONRPCException as e:
exc_msg = str(e)
if exc_msg == "Can't read block from disk (-32603)":
if fMustPass:
self.log.warning("Bad! Block was NOT stored to disk.")
err_msgs.append(exc_msg)
else:
self.log.info("Good. Block was not stored on disk.")
else:
self.log.warning(exc_msg)
err_msgs.append(exc_msg)
except Exception as e:
exc_msg = str(e)
self.log.error(exc_msg)
err_msgs.append(exc_msg)
self.log.info("Sent all %s blocks." % str(self.NUM_BLOCKS))
# Log final datadir size
self.log_data_dir_size()
# Return errors list
return err_msgs
| 46.678899 | 138 | 0.602201 | [
"MIT"
] | tdpsdevextreme/TradePlusCoin | test/functional/fake_stake/base_test.py | 20,352 | Python |
import asyncio
import sys
import pytest
from aws_lambda_powertools.event_handler import AppSyncResolver
from aws_lambda_powertools.event_handler.appsync import Router
from aws_lambda_powertools.utilities.data_classes import AppSyncResolverEvent
from aws_lambda_powertools.utilities.typing import LambdaContext
from tests.functional.utils import load_event
def test_direct_resolver():
# Check whether we can handle an example appsync direct resolver
mock_event = load_event("appSyncDirectResolver.json")
app = AppSyncResolver()
@app.resolver(field_name="createSomething")
def create_something(id: str): # noqa AA03 VNE003
assert app.lambda_context == {}
return id
# Call the implicit handler
result = app(mock_event, {})
assert result == "my identifier"
def test_amplify_resolver():
# Check whether we can handle an example appsync resolver
mock_event = load_event("appSyncResolverEvent.json")
app = AppSyncResolver()
@app.resolver(type_name="Merchant", field_name="locations")
def get_location(page: int, size: int, name: str):
assert app.current_event is not None
assert isinstance(app.current_event, AppSyncResolverEvent)
assert page == 2
assert size == 1
return name
def handler(event, context):
# Call the explicit resolve function
return app.resolve(event, context)
result = handler(mock_event, {})
assert result == "value"
def test_resolver_no_params():
# GIVEN
app = AppSyncResolver()
@app.resolver(type_name="Query", field_name="noParams")
def no_params():
return "no_params has no params"
event = {"typeName": "Query", "fieldName": "noParams", "arguments": {}}
# WHEN
result = app.resolve(event, LambdaContext())
# THEN
assert result == "no_params has no params"
def test_resolver_value_error():
# GIVEN no defined field resolver
app = AppSyncResolver()
# WHEN
with pytest.raises(ValueError) as exp:
event = {"typeName": "type", "fieldName": "field", "arguments": {}}
app.resolve(event, LambdaContext())
# THEN
assert exp.value.args[0] == "No resolver found for 'type.field'"
def test_resolver_yield():
# GIVEN
app = AppSyncResolver()
mock_event = {"typeName": "Customer", "fieldName": "field", "arguments": {}}
@app.resolver(field_name="field")
def func_yield():
yield "value"
# WHEN
mock_context = LambdaContext()
result = app.resolve(mock_event, mock_context)
# THEN
assert next(result) == "value"
def test_resolver_multiple_mappings():
# GIVEN
app = AppSyncResolver()
@app.resolver(field_name="listLocations")
@app.resolver(field_name="locations")
def get_locations(name: str, description: str = ""):
return name + description
# WHEN
mock_event1 = {"typeName": "Query", "fieldName": "listLocations", "arguments": {"name": "value"}}
mock_event2 = {
"typeName": "Merchant",
"fieldName": "locations",
"arguments": {"name": "value2", "description": "description"},
}
result1 = app.resolve(mock_event1, LambdaContext())
result2 = app.resolve(mock_event2, LambdaContext())
# THEN
assert result1 == "value"
assert result2 == "value2description"
@pytest.mark.skipif(sys.version_info < (3, 8), reason="only for python versions that support asyncio.run")
def test_resolver_async():
# GIVEN
app = AppSyncResolver()
mock_event = {"typeName": "Customer", "fieldName": "field", "arguments": {}}
@app.resolver(field_name="field")
async def get_async():
await asyncio.sleep(0.0001)
return "value"
# WHEN
mock_context = LambdaContext()
result = app.resolve(mock_event, mock_context)
# THEN
assert asyncio.run(result) == "value"
def test_resolve_custom_data_model():
# Check whether we can handle an example appsync direct resolver
mock_event = load_event("appSyncDirectResolver.json")
class MyCustomModel(AppSyncResolverEvent):
@property
def country_viewer(self):
return self.request_headers.get("cloudfront-viewer-country")
app = AppSyncResolver()
@app.resolver(field_name="createSomething")
def create_something(id: str): # noqa AA03 VNE003
return id
# Call the implicit handler
result = app(event=mock_event, context=LambdaContext(), data_model=MyCustomModel)
assert result == "my identifier"
assert app.current_event.country_viewer == "US"
def test_resolver_include_resolver():
# GIVEN
app = AppSyncResolver()
router = Router()
@router.resolver(type_name="Query", field_name="listLocations")
def get_locations(name: str):
return "get_locations#" + name
@app.resolver(field_name="listLocations2")
def get_locations2(name: str):
return "get_locations2#" + name
app.include_router(router)
# WHEN
mock_event1 = {"typeName": "Query", "fieldName": "listLocations", "arguments": {"name": "value"}}
mock_event2 = {"typeName": "Query", "fieldName": "listLocations2", "arguments": {"name": "value"}}
result1 = app.resolve(mock_event1, LambdaContext())
result2 = app.resolve(mock_event2, LambdaContext())
# THEN
assert result1 == "get_locations#value"
assert result2 == "get_locations2#value"
| 28.314136 | 106 | 0.67659 | [
"Apache-2.0",
"MIT-0"
] | BVMiko/aws-lambda-powertools-python | tests/functional/event_handler/test_appsync.py | 5,408 | Python |
#-*- coding: utf-8 -*-
# Import the extension
import isce3.extensions.isceextension as isceextension
# Import the wrappers
def crossmul(**kwds):
"""A factory for Crossmul"""
from .Crossmul import Crossmul
return Crossmul(**kwds)
# end of file
| 18.5 | 54 | 0.698842 | [
"Apache-2.0"
] | piyushrpt/isce3 | python/packages/isce3/signal/__init__.py | 259 | Python |
from IPython.display import display, Latex
display(Latex('$ d[mRNA_{nuc}]/dt = k_t - k_{exp} * [mRNA_{nuc}]$'))
print("and")
display(Latex('$ d[mRNA_{cyt}]/dt = k_{exp} * [mRNA_{nuc}] - k_{deg} * [mRNA_{cyt}]$'))
print("No. This is because mRNAs are continuously produced (and degraded) at independent rates in the model, so there is no particular reason why their number should be conserved.")
| 50.75 | 182 | 0.682266 | [
"MIT"
] | oercompbiomed/CBM101 | K_Mathematical_Modeling/Section 2/solutionODEsExercise3.py | 406 | Python |
import can
from threading import Thread
import asyncio
import settings
class ThreadModuleAbstract:
thread = None
bus: can.ThreadSafeBus = None
loop: asyncio.AbstractEventLoop = None
_can_filters: [] = None
def __init__(self):
self.bus = self._can = can.ThreadSafeBus(
channel=settings.CAN_INTERFACE,
bustype='socketcan_ctypes',
can_filters=self._can_filters
)
self.__register_notifier()
self.loop = asyncio.new_event_loop()
def run(self):
self.thread = Thread(target=self.execute)
self.thread.daemon = True
self.thread.start()
def execute(self):
pass
def _on_message(self, msg: can.Message):
pass
def __register_notifier(self):
self._notifier = can.Notifier(self.bus, [
self._on_message
], loop=self.loop)
| 23.918919 | 49 | 0.630508 | [
"MIT"
] | Exus1/alfa-blue-me | module/ThreadModuleAbstract.py | 885 | Python |
name = 'codebench'
| 9.5 | 18 | 0.684211 | [
"MIT"
] | li-boxuan/codebench | codebench/__init__.py | 19 | Python |
import pytest
from aiohttp import web
from app import pochta
@pytest.fixture
def app(aiohttp_client):
app = web.Application()
app.router.add_get('/pochta', pochta)
return aiohttp_client(app)
async def test_work(aiohttp_client, loop, app):
client = app()
resp = await client.get('/pochta?from_city=москва&from_street=алтуфьевское&to_city=уфа&to_street=парковая')
assert resp.status == 200
text = await resp.text()
assert '{"pochta": 259.34}' == text
async def test_not_work(aiohttp_client, loop, app):
client = app()
resp = await client.get('/pochta?from_city=москва&from_street=алтуфьевское&to_city=уфа')
assert resp.status == 200
text = await resp.text()
assert '{"pochta": null}' == text
| 25.862069 | 111 | 0.698667 | [
"MIT"
] | postman17/lks-delivery | test-aiohttp.py | 800 | Python |
import random
import pytest
import numpy as np
from eight_mile.utils import calc_nfeats
def test_use_nfeats():
filtsz = [random.randint(1, 10) for _ in range(random.randint(2, 6))]
input_nfeat = random.randint(1, 100)
gold_nfeats = [input_nfeat] * len(filtsz)
_, nfeat = calc_nfeats(filtsz, None, None, nfeats=input_nfeat)
assert nfeat == gold_nfeats
def test_use_nfeats_filtsz_unchanged():
gold_filtsz = [random.randint(1, 10) for _ in range(random.randint(2, 6))]
nfeat = random.randint(1, 100)
filtsz, _ = calc_nfeats(gold_filtsz, None, None, nfeats=nfeat)
assert filtsz == gold_filtsz
def test_use_nfeats_none():
filtsz = [random.randint(1, 10) for _ in range(random.randint(2, 6))]
with pytest.raises(AssertionError):
calc_nfeats(filtsz)
def test_use_nfeats_list():
filtsz = [random.randint(1, 10) for _ in range(random.randint(2, 6))]
nfeats = [random.randint(1, 10) for _ in range(len(filtsz))]
with pytest.raises(AssertionError):
_, nfeat = calc_nfeats(filtsz, None, None, nfeats=nfeats)
def test_extract_tuple():
filt_feat = [(random.randint(1, 10), random.randint(10, 20)) for _ in range(random.randint(2, 6))]
gold_filtsz = tuple(filter_and_size[0] for filter_and_size in filt_feat)
gold_nfeats = tuple(filter_and_size[1] for filter_and_size in filt_feat)
filtsz, nfeats = calc_nfeats(filt_feat)
assert filtsz == gold_filtsz
assert nfeats == gold_nfeats
def test_feat_factor_manual():
gold_filtsz = [1, 2, 3, 4, 5]
feat_factor = 10
gold_nfeats = [10, 20, 30, 40, 50]
filtsz, nfeats = calc_nfeats(gold_filtsz, feat_factor, float("Infinity"))
assert filtsz == gold_filtsz
assert nfeats == gold_nfeats
def test_feat_factor_capped():
gold_filtsz = [1, 2, 3, 4, 5]
feat_factor = 10
gold_nfeats = [10, 20, 30, 30, 30]
filtsz, nfeats = calc_nfeats(gold_filtsz, feat_factor, 30)
assert filtsz == gold_filtsz
assert nfeats == gold_nfeats
def test_feat_factor():
gold_filtsz = [random.randint(1, 10) for _ in range(random.randint(2, 6))]
feat_factor = random.randint(10, 25)
max_feat = random.randint(30, 40)
gold_nfeats = np.minimum(np.array(gold_filtsz) * feat_factor, max_feat)
filtsz, nfeats = calc_nfeats(gold_filtsz, feat_factor, max_feat)
np.testing.assert_equal(filtsz, gold_filtsz)
np.testing.assert_equal(nfeats, gold_nfeats)
def test_feat_factor_max_none():
filtsz = [random.randint(1, 10) for _ in range(random.randint(2, 6))]
feat_factor = 10
with pytest.raises(AssertionError):
calc_nfeats(filtsz, nfeat_factor=feat_factor, max_feat=None)
| 34.597403 | 102 | 0.703829 | [
"Apache-2.0"
] | blester125/baseline | tests/test_calc_feats.py | 2,664 | Python |
from typing import List, Tuple
import pytest
from returns.io import IOFailure, IOResult, IOSuccess
from returns.pipeline import managed
from returns.result import Failure, Result, Success
_acquire_success = IOSuccess('acquire success')
_acquire_failure = IOFailure('acquire failure')
def _use_success(inner_value: str) -> IOResult[str, str]:
return IOSuccess('use success')
def _use_failure(inner_value: str) -> IOResult[str, str]:
return IOFailure('use failure')
class _ReleaseSuccess(object):
def __init__(self, logs: List[Tuple[str, Result[str, str]]]) -> None:
self._logs = logs
def __call__(
self,
inner_value: str,
use_result: Result[str, str],
) -> IOResult[None, str]:
self._logs.append((inner_value, use_result))
return IOSuccess(None)
class _ReleaseFailure(object):
def __init__(self, logs: List[Tuple[str, Result[str, str]]]) -> None:
self._logs = logs
def __call__(
self,
inner_value: str,
use_result: Result[str, str],
) -> IOResult[None, str]:
return IOFailure('release failure')
@pytest.mark.parametrize(('acquire', 'use', 'release', 'final_result', 'log'), [
# Acquire success:
(
_acquire_success,
_use_success,
_ReleaseSuccess,
IOSuccess('use success'),
[('acquire success', Success('use success'))],
),
(
_acquire_success,
_use_success,
_ReleaseFailure,
IOFailure('release failure'),
[],
),
(
_acquire_success,
_use_failure,
_ReleaseSuccess,
IOFailure('use failure'),
[('acquire success', Failure('use failure'))],
),
(
_acquire_success,
_use_failure,
_ReleaseFailure,
IOFailure('release failure'),
[],
),
# Acquire failure:
(
_acquire_failure,
_use_success,
_ReleaseSuccess,
IOFailure('acquire failure'),
[],
),
(
_acquire_failure,
_use_failure,
_ReleaseSuccess,
IOFailure('acquire failure'),
[],
),
(
_acquire_failure,
_use_success,
_ReleaseFailure,
IOFailure('acquire failure'),
[],
),
(
_acquire_failure,
_use_failure,
_ReleaseFailure,
IOFailure('acquire failure'),
[],
),
])
def test_all_success(acquire, use, release, final_result, log):
"""Ensures that managed works as intended."""
pipeline_logs: List[Tuple[str, Result[str, str]]] = []
pipeline_result = managed(
use,
release(pipeline_logs),
)(acquire)
assert pipeline_result == final_result
assert pipeline_logs == log
def test_full_typing():
"""This test is here to be a case for typing."""
logs: List[Tuple[str, Result[str, str]]] = []
pipeline_result = managed(
_use_success,
_ReleaseSuccess(logs),
)(_acquire_success)
assert pipeline_result == IOSuccess('use success')
assert logs == [('acquire success', Success('use success'))]
| 24.217054 | 80 | 0.601472 | [
"BSD-2-Clause"
] | CucumisSativus/returns | tests/test_pipeline/test_managed/test_managed_ioresult.py | 3,124 | Python |
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ split_long_utter_to_short.py ]
# Synopsis [ preprocess long audio / speech to shorter versions ]
# Author [ Andy T. Liu (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import torch
import argparse
import torchaudio
from tqdm import tqdm
from pathlib import Path
from joblib import Parallel, delayed
torchaudio.set_audio_backend("sox_io")
#############################
# PREPROCESS CONFIGURATIONS #
#############################
def get_preprocess_args():
parser = argparse.ArgumentParser(description='preprocess arguments for any dataset.')
parser.add_argument('-i', '--input_path', default='/livingrooms/public/LibriLight/', type=str, help='Path to your LibriSpeech directory', required=False)
parser.add_argument('-o', '--output_path', default='/livingrooms/public/LibriLight/', type=str, help='Path to store output', required=False)
parser.add_argument('-s', '--split_size', default=60, type=int, help='Split size in seconds', required=False)
parser.add_argument('-a', '--audio_extension', default='.flac', type=str, help='audio file type (.wav / .flac / .mp3 / etc)', required=False)
parser.add_argument('-n', '--name', default='-splitted', type=str, help='Name to append on the original directory', required=False)
parser.add_argument('--n_jobs', default=-1, type=int, help='Number of jobs used for computation', required=False)
args = parser.parse_args()
return args
##################
# SPLIT AND SAVE #
##################
def split_and_save(input_file, current_split, args):
wav, sr = torchaudio.load(input_file)
# compute the size of each chunk
chunk_size = args.split_size*sr
quotient, remainder = divmod(wav.size(1), chunk_size)
sections = [chunk_size for _ in range(quotient)]
sections.append(remainder) # the remainder is the last chunk
splitted_wav = torch.split(wav, split_size_or_sections=sections, dim=1)
check_sum = 0
for i, w in enumerate(splitted_wav):
check_sum += w.size(1)
file_name = os.path.basename(input_file).split('.')[0]
new_file_name = file_name.replace(file_name, file_name+'-'+str(i))
new_file_path = input_file.replace(current_split, current_split+args.name)
new_file_path = new_file_path.replace(file_name, new_file_name)
if args.input_path != args.output_path:
new_file_path = new_file_path.replace(args.input_path, args.output_path)
os.makedirs((os.path.dirname(new_file_path)), exist_ok=True)
torchaudio.save(new_file_path, w, sr)
assert check_sum == wav.size(1)
###################
# GENERATE SPLITS #
###################
def generate_splits(args, tr_set, audio_extension):
for i, s in enumerate(tr_set):
if os.path.isdir(os.path.join(args.input_path, s.lower())):
s = s.lower()
elif os.path.isdir(os.path.join(args.input_path, s.upper())):
s = s.upper()
else:
assert NotImplementedError
print('')
todo = list(Path(os.path.join(args.input_path, s)).rglob('*' + audio_extension)) # '*.flac'
print(f'Preprocessing data in: {s}, {len(todo)} audio files found.')
print('Splitting audio to shorter length...', flush=True)
Parallel(n_jobs=args.n_jobs)(delayed(split_and_save)(str(file), s, args) for file in tqdm(todo))
print('All done, saved at', args.output_path, 'exit.')
########
# MAIN #
########
def main():
# get arguments
args = get_preprocess_args()
if 'librilight' in args.input_path.lower():
SETS = ['small', 'medium', 'large']
elif 'librispeech' in args.input_path.lower():
SETS = ['train-clean-100', 'train-clean-360', 'train-other-500', 'dev-clean', 'dev-other', 'test-clean', 'test-other']
elif 'timit' in args.input_path.lower():
SETS = ['TRAIN', 'TEST']
else:
raise NotImplementedError
# change the SETS list to match your dataset, for example:
# SETS = ['train', 'dev', 'test']
# SETS = ['TRAIN', 'TEST']
# SETS = ['train-clean-100', 'train-clean-360', 'train-other-500', 'dev-clean', 'dev-other', 'test-clean', 'test-other']
# Select data sets
for idx, s in enumerate(SETS):
print('\t', idx, ':', s)
tr_set = input('Please enter the index of splits you wish to use preprocess. (seperate with space): ')
tr_set = [SETS[int(t)] for t in tr_set.split(' ')]
# Run split
generate_splits(args, tr_set, args.audio_extension)
if __name__ == '__main__':
main()
| 38.354331 | 157 | 0.607678 | [
"MIT"
] | Ethan07902050/s3prl | s3prl/preprocess/split_long_utter_to_short.py | 4,871 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because ttmd should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 43.862385 | 104 | 0.632922 | [
"MIT"
] | mytitanium/Titanium-Core-1.0 | test/functional/interface_http.py | 4,781 | Python |
import time, sys, os
from database_check import database_check
from link_processor import get_link
from link_generator import alphabets_generator, random_address_generator, linear_address_generator, last_link_read_linear_address, mutation_address_generator
def program_exit(link, work_mode):
if work_mode == '1': # 1 = linear
print('Saving last linear link position')
open('last_link', 'w').write(link)
# if work_mode == '3':
# print('Mutation variations of the link ended')
print('Script stoped')
sys.exit(0)
def start_link():
n_letters = input('How many letters in the link might be (at least 5): ')
start_point = 'a' + '1'*(int(n_letters)-1)
open('last_link', 'w').write(start_point)
database_check()
def main():
parser_type = input('''What type of content do you want to parse (input several numbers, if you want to parse any combination of the possible content):
1 - All (Groups/Channel/Users
2 - Channels
3 - Groups
4 - Users
Your choise: ''')[:2].lower()
work_mode = input('''What type of parsing you want to use:
1 - Linear parsing
2 - Random parsing
3 - Mutation parsing
Your choise: ''')[0].lower()
turbo_mode = input('Turn on turbo mod(y/n): ')[0].lower() # work mode with/out delay
output = input('''What type of output do you want:
1 - All output (not False will be only the content, that was choosed to parse)
2 - If something found
3 - No output
Your choise: ''')[0].lower()
alphabet, alphabet1 = alphabets_generator()
if work_mode == '1':
try: # LINK Checking
open('last_link').read()
change_start = input('Do you want to change number of letters in link(y/n): ')[0].lower()
if change_start == 'y':
start_link()
except:
print('Initial setup!')
start_link()
linear_letter_link_ids_array = last_link_read_linear_address(alphabet, alphabet1)
if work_mode == '3':
try:
os.remove('mutated')
except:
pass
mutated_initial_link = input('Input initial word to mutate (length of the word is greater than 5 letters): ').lower()
mutated_array = mutation_address_generator(mutated_initial_link)
total_mutated_rows = len(mutated_array)
print('Total mutation created: ',total_mutated_rows)
mutated_word_id = 0
try:
print('Parser is started!')
while True:
if work_mode =='1': # 1 = linear
link = linear_address_generator(alphabet, alphabet1, linear_letter_link_ids_array)
open('last_link', 'w').write(link)
elif work_mode == '2': # 2 = random
link = random_address_generator(alphabet, alphabet1)
elif work_mode == '3': # 3 = mutation
if total_mutated_rows > mutated_word_id +1:
link = mutated_array[mutated_word_id]
mutated_word_id += 1
else:
program_exit(link, work_mode)
url_get_status = get_link(link, output, parser_type)
if url_get_status == 'connection_error':
program_exit(link, work_mode)
if turbo_mode != 'y':
time.sleep(1.5)
else:
continue
except KeyboardInterrupt:
if work_mode == '3':
print('Mutation checking keyboard interupted')
program_exit(link, work_mode)
main() | 38.468085 | 157 | 0.600111 | [
"Apache-2.0"
] | flexagoon/telegram_parser | telegram_parser_console/main.py | 3,616 | Python |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for the L{twisted.python.failure} module.
"""
from __future__ import division, absolute_import
import re
import sys
import traceback
import pdb
import linecache
from twisted.python.compat import _PY3, NativeStringIO
from twisted.python import reflect
from twisted.python import failure
from twisted.trial.unittest import SkipTest, SynchronousTestCase
try:
from twisted.test import raiser
except ImportError:
raiser = None
def getDivisionFailure(*args, **kwargs):
"""
Make a C{Failure} of a divide-by-zero error.
@param args: Any C{*args} are passed to Failure's constructor.
@param kwargs: Any C{**kwargs} are passed to Failure's constructor.
"""
try:
1/0
except:
f = failure.Failure(*args, **kwargs)
return f
class FailureTests(SynchronousTestCase):
"""
Tests for L{failure.Failure}.
"""
def test_failAndTrap(self):
"""
Trapping a L{Failure}.
"""
try:
raise NotImplementedError('test')
except:
f = failure.Failure()
error = f.trap(SystemExit, RuntimeError)
self.assertEqual(error, RuntimeError)
self.assertEqual(f.type, NotImplementedError)
def test_trapRaisesWrappedException(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises the wrapped
C{Exception}.
"""
if not _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 3.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(ValueError, f.trap, OverflowError)
self.assertIs(exception, untrapped)
def test_trapRaisesSelf(self):
"""
If the wrapped C{Exception} is not a subclass of one of the
expected types, L{failure.Failure.trap} raises itself.
"""
if _PY3:
raise SkipTest(
"""
Only expected behaviour on Python 2.
@see U{http://twisted.readthedocs.io/en/latest/core/howto/python3.html#twisted-python-failure}
"""
)
exception = ValueError()
try:
raise exception
except:
f = failure.Failure()
untrapped = self.assertRaises(failure.Failure, f.trap, OverflowError)
self.assertIs(f, untrapped)
def test_failureValueFromFailure(self):
"""
A L{failure.Failure} constructed from another
L{failure.Failure} instance, has its C{value} property set to
the value of that L{failure.Failure} instance.
"""
exception = ValueError()
f1 = failure.Failure(exception)
f2 = failure.Failure(f1)
self.assertIs(f2.value, exception)
def test_failureValueFromFoundFailure(self):
"""
A L{failure.Failure} constructed without a C{exc_value}
argument, will search for an "original" C{Failure}, and if
found, its value will be used as the value for the new
C{Failure}.
"""
exception = ValueError()
f1 = failure.Failure(exception)
try:
f1.trap(OverflowError)
except:
f2 = failure.Failure()
self.assertIs(f2.value, exception)
def assertStartsWith(self, s, prefix):
"""
Assert that C{s} starts with a particular C{prefix}.
@param s: The input string.
@type s: C{str}
@param prefix: The string that C{s} should start with.
@type prefix: C{str}
"""
self.assertTrue(s.startswith(prefix),
'%r is not the start of %r' % (prefix, s))
def assertEndsWith(self, s, suffix):
"""
Assert that C{s} end with a particular C{suffix}.
@param s: The input string.
@type s: C{str}
@param suffix: The string that C{s} should end with.
@type suffix: C{str}
"""
self.assertTrue(s.endswith(suffix),
'%r is not the end of %r' % (suffix, s))
def assertTracebackFormat(self, tb, prefix, suffix):
"""
Assert that the C{tb} traceback contains a particular C{prefix} and
C{suffix}.
@param tb: The traceback string.
@type tb: C{str}
@param prefix: The string that C{tb} should start with.
@type prefix: C{str}
@param suffix: The string that C{tb} should end with.
@type suffix: C{str}
"""
self.assertStartsWith(tb, prefix)
self.assertEndsWith(tb, suffix)
def assertDetailedTraceback(self, captureVars=False, cleanFailure=False):
"""
Assert that L{printDetailedTraceback} produces and prints a detailed
traceback.
The detailed traceback consists of a header::
*--- Failure #20 ---
The body contains the stacktrace::
/twisted/trial/_synctest.py:1180: _run(...)
/twisted/python/util.py:1076: runWithWarningsSuppressed(...)
--- <exception caught here> ---
/twisted/test/test_failure.py:39: getDivisionFailure(...)
If C{captureVars} is enabled the body also includes a list of
globals and locals::
[ Locals ]
exampleLocalVar : 'xyz'
...
( Globals )
...
Or when C{captureVars} is disabled::
[Capture of Locals and Globals disabled (use captureVars=True)]
When C{cleanFailure} is enabled references to other objects are removed
and replaced with strings.
And finally the footer with the L{Failure}'s value::
exceptions.ZeroDivisionError: float division
*--- End of Failure #20 ---
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
@param cleanFailure: Enables L{Failure.cleanFailure}.
@type cleanFailure: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyz'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
if cleanFailure:
f.cleanFailure()
f.printDetailedTraceback(out)
tb = out.getvalue()
start = "*--- Failure #%d%s---\n" % (f.count,
(f.pickled and ' (pickled) ') or ' ')
end = "%s: %s\n*--- End of Failure #%s ---\n" % (reflect.qual(f.type),
reflect.safe_str(f.value), f.count)
self.assertTracebackFormat(tb, start, end)
# Variables are printed on lines with 2 leading spaces.
linesWithVars = [line for line in tb.splitlines()
if line.startswith(' ')]
if captureVars:
self.assertNotEqual([], linesWithVars)
if cleanFailure:
line = ' exampleLocalVar : "\'xyz\'"'
else:
line = " exampleLocalVar : 'xyz'"
self.assertIn(line, linesWithVars)
else:
self.assertEqual([], linesWithVars)
self.assertIn(' [Capture of Locals and Globals disabled (use '
'captureVars=True)]\n', tb)
def assertBriefTraceback(self, captureVars=False):
"""
Assert that L{printBriefTraceback} produces and prints a brief
traceback.
The brief traceback consists of a header::
Traceback: <type 'exceptions.ZeroDivisionError'>: float division
The body with the stacktrace::
/twisted/trial/_synctest.py:1180:_run
/twisted/python/util.py:1076:runWithWarningsSuppressed
And the footer::
--- <exception caught here> ---
/twisted/test/test_failure.py:39:getDivisionFailure
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'abcde'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure()
out = NativeStringIO()
f.printBriefTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += '%s:%s:%s\n' % (filename, lineno, method)
zde = repr(ZeroDivisionError)
self.assertTracebackFormat(tb,
"Traceback: %s: " % (zde,),
"%s\n%s" % (failure.EXCEPTION_CAUGHT_HERE, stack))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*abcde', tb))
def assertDefaultTraceback(self, captureVars=False):
"""
Assert that L{printTraceback} produces and prints a default traceback.
The default traceback consists of a header::
Traceback (most recent call last):
The body with traceback::
File "/twisted/trial/_synctest.py", line 1180, in _run
runWithWarningsSuppressed(suppress, method)
And the footer::
--- <exception caught here> ---
File "twisted/test/test_failure.py", line 39, in getDivisionFailure
1/0
exceptions.ZeroDivisionError: float division
@param captureVars: Enables L{Failure.captureVars}.
@type captureVars: C{bool}
"""
if captureVars:
exampleLocalVar = 'xyzzy'
# Silence the linter as this variable is checked via
# the traceback.
exampleLocalVar
f = getDivisionFailure(captureVars=captureVars)
out = NativeStringIO()
f.printTraceback(out)
tb = out.getvalue()
stack = ''
for method, filename, lineno, localVars, globalVars in f.frames:
stack += ' File "%s", line %s, in %s\n' % (filename, lineno,
method)
stack += ' %s\n' % (linecache.getline(
filename, lineno).strip(),)
self.assertTracebackFormat(tb,
"Traceback (most recent call last):",
"%s\n%s%s: %s\n" % (failure.EXCEPTION_CAUGHT_HERE, stack,
reflect.qual(f.type), reflect.safe_str(f.value)))
if captureVars:
self.assertIsNone(re.search('exampleLocalVar.*xyzzy', tb))
def test_printDetailedTraceback(self):
"""
L{printDetailedTraceback} returns a detailed traceback including the
L{Failure}'s count.
"""
self.assertDetailedTraceback()
def test_printBriefTraceback(self):
"""
L{printBriefTraceback} returns a brief traceback.
"""
self.assertBriefTraceback()
def test_printTraceback(self):
"""
L{printTraceback} returns a traceback.
"""
self.assertDefaultTraceback()
def test_printDetailedTracebackCapturedVars(self):
"""
L{printDetailedTraceback} captures the locals and globals for its
stack frames and adds them to the traceback, when called on a
L{Failure} constructed with C{captureVars=True}.
"""
self.assertDetailedTraceback(captureVars=True)
def test_printBriefTracebackCapturedVars(self):
"""
L{printBriefTraceback} returns a brief traceback when called on a
L{Failure} constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertBriefTraceback(captureVars=True)
def test_printTracebackCapturedVars(self):
"""
L{printTraceback} returns a traceback when called on a L{Failure}
constructed with C{captureVars=True}.
Local variables on the stack can not be seen in the resulting
traceback.
"""
self.assertDefaultTraceback(captureVars=True)
def test_printDetailedTracebackCapturedVarsCleaned(self):
"""
C{printDetailedTraceback} includes information about local variables on
the stack after C{cleanFailure} has been called.
"""
self.assertDetailedTraceback(captureVars=True, cleanFailure=True)
def test_invalidFormatFramesDetail(self):
"""
L{failure.format_frames} raises a L{ValueError} if the supplied
C{detail} level is unknown.
"""
self.assertRaises(ValueError, failure.format_frames, None, None,
detail='noisia')
def test_ExplictPass(self):
e = RuntimeError()
f = failure.Failure(e)
f.trap(RuntimeError)
self.assertEqual(f.value, e)
def _getInnermostFrameLine(self, f):
try:
f.raiseException()
except ZeroDivisionError:
tb = traceback.extract_tb(sys.exc_info()[2])
return tb[-1][-1]
else:
raise Exception(
"f.raiseException() didn't raise ZeroDivisionError!?")
def test_RaiseExceptionWithTB(self):
f = getDivisionFailure()
innerline = self._getInnermostFrameLine(f)
self.assertEqual(innerline, '1/0')
def test_stringExceptionConstruction(self):
"""
Constructing a C{Failure} with a string as its exception value raises
a C{TypeError}, as this is no longer supported as of Python 2.6.
"""
exc = self.assertRaises(TypeError, failure.Failure, "ono!")
self.assertIn("Strings are not supported by Failure", str(exc))
def test_ConstructionFails(self):
"""
Creating a Failure with no arguments causes it to try to discover the
current interpreter exception state. If no such state exists, creating
the Failure should raise a synchronous exception.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertRaises(failure.NoCurrentExceptionError, failure.Failure)
def test_getTracebackObject(self):
"""
If the C{Failure} has not been cleaned, then C{getTracebackObject}
returns the traceback object that captured in its constructor.
"""
f = getDivisionFailure()
self.assertEqual(f.getTracebackObject(), f.tb)
def test_getTracebackObjectFromCaptureVars(self):
"""
C{captureVars=True} has no effect on the result of
C{getTracebackObject}.
"""
try:
1/0
except ZeroDivisionError:
noVarsFailure = failure.Failure()
varsFailure = failure.Failure(captureVars=True)
self.assertEqual(noVarsFailure.getTracebackObject(), varsFailure.tb)
def test_getTracebackObjectFromClean(self):
"""
If the Failure has been cleaned, then C{getTracebackObject} returns an
object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure()
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertIsNotNone(expected)
self.assertEqual(expected, observed)
def test_getTracebackObjectFromCaptureVarsAndClean(self):
"""
If the Failure was created with captureVars, then C{getTracebackObject}
returns an object that looks the same to L{traceback.extract_tb}.
"""
f = getDivisionFailure(captureVars=True)
expected = traceback.extract_tb(f.getTracebackObject())
f.cleanFailure()
observed = traceback.extract_tb(f.getTracebackObject())
self.assertEqual(expected, observed)
def test_getTracebackObjectWithoutTraceback(self):
"""
L{failure.Failure}s need not be constructed with traceback objects. If
a C{Failure} has no traceback information at all, C{getTracebackObject}
just returns None.
None is a good value, because traceback.extract_tb(None) -> [].
"""
f = failure.Failure(Exception("some error"))
self.assertIsNone(f.getTracebackObject())
def test_tracebackFromExceptionInPython3(self):
"""
If a L{failure.Failure} is constructed with an exception but no
traceback in Python 3, the traceback will be extracted from the
exception's C{__traceback__} attribute.
"""
try:
1/0
except:
klass, exception, tb = sys.exc_info()
f = failure.Failure(exception)
self.assertIs(f.tb, tb)
def test_cleanFailureRemovesTracebackInPython3(self):
"""
L{failure.Failure.cleanFailure} sets the C{__traceback__} attribute of
the exception to L{None} in Python 3.
"""
f = getDivisionFailure()
self.assertIsNotNone(f.tb)
self.assertIs(f.value.__traceback__, f.tb)
f.cleanFailure()
self.assertIsNone(f.value.__traceback__)
if getattr(BaseException, "__traceback__", None) is None:
test_tracebackFromExceptionInPython3.skip = "Python 3 only."
test_cleanFailureRemovesTracebackInPython3.skip = "Python 3 only."
def test_repr(self):
"""
The C{repr} of a L{failure.Failure} shows the type and string
representation of the underlying exception.
"""
f = getDivisionFailure()
typeName = reflect.fullyQualifiedName(ZeroDivisionError)
self.assertEqual(
repr(f),
'<twisted.python.failure.Failure '
'%s: division by zero>' % (typeName,))
class BrokenStr(Exception):
"""
An exception class the instances of which cannot be presented as strings via
C{str}.
"""
def __str__(self):
# Could raise something else, but there's no point as yet.
raise self
class BrokenExceptionMetaclass(type):
"""
A metaclass for an exception type which cannot be presented as a string via
C{str}.
"""
def __str__(self):
raise ValueError("You cannot make a string out of me.")
class BrokenExceptionType(Exception, object):
"""
The aforementioned exception type which cnanot be presented as a string via
C{str}.
"""
__metaclass__ = BrokenExceptionMetaclass
class GetTracebackTests(SynchronousTestCase):
"""
Tests for L{Failure.getTraceback}.
"""
def _brokenValueTest(self, detail):
"""
Construct a L{Failure} with an exception that raises an exception from
its C{__str__} method and then call C{getTraceback} with the specified
detail and verify that it returns a string.
"""
x = BrokenStr()
f = failure.Failure(x)
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenValueBriefDetail(self):
"""
A L{Failure} might wrap an exception with a C{__str__} method which
raises an exception. In this case, calling C{getTraceback} on the
failure with the C{"brief"} detail does not raise an exception.
"""
self._brokenValueTest("brief")
def test_brokenValueDefaultDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("default")
def test_brokenValueVerboseDetail(self):
"""
Like test_brokenValueBriefDetail, but for the C{"default"} detail case.
"""
self._brokenValueTest("verbose")
def _brokenTypeTest(self, detail):
"""
Construct a L{Failure} with an exception type that raises an exception
from its C{__str__} method and then call C{getTraceback} with the
specified detail and verify that it returns a string.
"""
f = failure.Failure(BrokenExceptionType())
traceback = f.getTraceback(detail=detail)
self.assertIsInstance(traceback, str)
def test_brokenTypeBriefDetail(self):
"""
A L{Failure} might wrap an exception the type object of which has a
C{__str__} method which raises an exception. In this case, calling
C{getTraceback} on the failure with the C{"brief"} detail does not raise
an exception.
"""
self._brokenTypeTest("brief")
def test_brokenTypeDefaultDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"default"} detail case.
"""
self._brokenTypeTest("default")
def test_brokenTypeVerboseDetail(self):
"""
Like test_brokenTypeBriefDetail, but for the C{"verbose"} detail case.
"""
self._brokenTypeTest("verbose")
class FindFailureTests(SynchronousTestCase):
"""
Tests for functionality related to L{Failure._findFailure}.
"""
def test_findNoFailureInExceptionHandler(self):
"""
Within an exception handler, _findFailure should return
L{None} in case no Failure is associated with the current
exception.
"""
try:
1/0
except:
self.assertIsNone(failure.Failure._findFailure())
else:
self.fail("No exception raised from 1/0!?")
def test_findNoFailure(self):
"""
Outside of an exception handler, _findFailure should return None.
"""
if sys.version_info < (3, 0):
sys.exc_clear()
self.assertIsNone(sys.exc_info()[-1]) #environment sanity check
self.assertIsNone(failure.Failure._findFailure())
def test_findFailure(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by raiseException).
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
self.assertEqual(failure.Failure._findFailure(), f)
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
raiseException, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
try:
f.raiseException()
except:
newF = failure.Failure()
self.assertEqual(f.getTraceback(), newF.getTraceback())
else:
self.fail("No exception raised from raiseException!?")
def test_failureConstructionWithMungedStackSucceeds(self):
"""
Pyrex and Cython are known to insert fake stack frames so as to give
more Python-like tracebacks. These stack frames with empty code objects
should not break extraction of the exception.
"""
try:
raiser.raiseException()
except raiser.RaiserException:
f = failure.Failure()
self.assertTrue(f.check(raiser.RaiserException))
else:
self.fail("No exception raised from extension?!")
if raiser is None:
skipMsg = "raiser extension not available"
test_failureConstructionWithMungedStackSucceeds.skip = skipMsg
# On Python 3.5, extract_tb returns "FrameSummary" objects, which are almost
# like the old tuples. This being different does not affect the actual tests
# as we are testing that the input works, and that extract_tb returns something
# reasonable.
if sys.version_info < (3, 5):
_tb = lambda fn, lineno, name, text: (fn, lineno, name, text)
else:
from traceback import FrameSummary
_tb = lambda fn, lineno, name, text: FrameSummary(fn, lineno, name)
class FormattableTracebackTests(SynchronousTestCase):
"""
Whitebox tests that show that L{failure._Traceback} constructs objects that
can be used by L{traceback.extract_tb}.
If the objects can be used by L{traceback.extract_tb}, then they can be
formatted using L{traceback.format_tb} and friends.
"""
def test_singleFrame(self):
"""
A C{_Traceback} object constructed with a single frame should be able
to be passed to L{traceback.extract_tb}, and we should get a singleton
list containing a (filename, lineno, methodname, line) tuple.
"""
tb = failure._Traceback([['method', 'filename.py', 123, {}, {}]])
# Note that we don't need to test that extract_tb correctly extracts
# the line's contents. In this case, since filename.py doesn't exist,
# it will just use None.
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method', None)])
def test_manyFrames(self):
"""
A C{_Traceback} object constructed with multiple frames should be able
to be passed to L{traceback.extract_tb}, and we should get a list
containing a tuple for each frame.
"""
tb = failure._Traceback([
['method1', 'filename.py', 123, {}, {}],
['method2', 'filename.py', 235, {}, {}]])
self.assertEqual(traceback.extract_tb(tb),
[_tb('filename.py', 123, 'method1', None),
_tb('filename.py', 235, 'method2', None)])
class FrameAttributesTests(SynchronousTestCase):
"""
_Frame objects should possess some basic attributes that qualify them as
fake python Frame objects.
"""
def test_fakeFrameAttributes(self):
"""
L{_Frame} instances have the C{f_globals} and C{f_locals} attributes
bound to C{dict} instance. They also have the C{f_code} attribute
bound to something like a code object.
"""
frame = failure._Frame("dummyname", "dummyfilename")
self.assertIsInstance(frame.f_globals, dict)
self.assertIsInstance(frame.f_locals, dict)
self.assertIsInstance(frame.f_code, failure._Code)
class DebugModeTests(SynchronousTestCase):
"""
Failure's debug mode should allow jumping into the debugger.
"""
def setUp(self):
"""
Override pdb.post_mortem so we can make sure it's called.
"""
# Make sure any changes we make are reversed:
post_mortem = pdb.post_mortem
origInit = failure.Failure.__init__
def restore():
pdb.post_mortem = post_mortem
failure.Failure.__init__ = origInit
self.addCleanup(restore)
self.result = []
pdb.post_mortem = self.result.append
failure.startDebugMode()
def test_regularFailure(self):
"""
If startDebugMode() is called, calling Failure() will first call
pdb.post_mortem with the traceback.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure()
self.assertEqual(self.result, [tb])
self.assertFalse(f.captureVars)
def test_captureVars(self):
"""
If startDebugMode() is called, passing captureVars to Failure() will
not blow up.
"""
try:
1/0
except:
typ, exc, tb = sys.exc_info()
f = failure.Failure(captureVars=True)
self.assertEqual(self.result, [tb])
self.assertTrue(f.captureVars)
class ExtendedGeneratorTests(SynchronousTestCase):
"""
Tests C{failure.Failure} support for generator features added in Python 2.5
"""
def _throwIntoGenerator(self, f, g):
try:
f.throwExceptionIntoGenerator(g)
except StopIteration:
pass
else:
self.fail("throwExceptionIntoGenerator should have raised "
"StopIteration")
def test_throwExceptionIntoGenerator(self):
"""
It should be possible to throw the exception that a Failure
represents into a generator.
"""
stuff = []
def generator():
try:
yield
except:
stuff.append(sys.exc_info())
else:
self.fail("Yield should have yielded exception.")
g = generator()
f = getDivisionFailure()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(stuff[0][0], ZeroDivisionError)
self.assertIsInstance(stuff[0][1], ZeroDivisionError)
self.assertEqual(traceback.extract_tb(stuff[0][2])[-1][-1], "1/0")
def test_findFailureInGenerator(self):
"""
Within an exception handler, it should be possible to find the
original Failure that caused the current exception (if it was
caused by throwExceptionIntoGenerator).
"""
f = getDivisionFailure()
f.cleanFailure()
foundFailures = []
def generator():
try:
yield
except:
foundFailures.append(failure.Failure._findFailure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(foundFailures, [f])
def test_failureConstructionFindsOriginalFailure(self):
"""
When a Failure is constructed in the context of an exception
handler that is handling an exception raised by
throwExceptionIntoGenerator, the new Failure should be chained to that
original Failure.
"""
f = getDivisionFailure()
f.cleanFailure()
newFailures = []
def generator():
try:
yield
except:
newFailures.append(failure.Failure())
else:
self.fail("No exception sent to generator")
g = generator()
next(g)
self._throwIntoGenerator(f, g)
self.assertEqual(len(newFailures), 1)
self.assertEqual(newFailures[0].getTraceback(), f.getTraceback())
def test_ambiguousFailureInGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} inside the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
try:
yield
except:
[][1]
except:
self.assertIsInstance(failure.Failure().value, IndexError)
g = generator()
next(g)
f = getDivisionFailure()
self._throwIntoGenerator(f, g)
def test_ambiguousFailureFromGenerator(self):
"""
When a generator reraises a different exception,
L{Failure._findFailure} above the generator should find the reraised
exception rather than original one.
"""
def generator():
try:
yield
except:
[][1]
g = generator()
next(g)
f = getDivisionFailure()
try:
self._throwIntoGenerator(f, g)
except:
self.assertIsInstance(failure.Failure().value, IndexError)
| 31.460697 | 110 | 0.607281 | [
"Apache-2.0"
] | XZH950926/meizitu | fang/Twisted-18.4.0/src/twisted/test/test_failure.py | 31,618 | Python |
from sendbee_api.models import Model
from sendbee_api.fields import TextField, BooleanField
class RateLimitError(Model):
"""Data model for rate limit error"""
_detail = TextField(index='detail', desc='Message detail')
_error = BooleanField(index='error', desc='Error or not')
_type = TextField(index='type', desc='Message type')
| 31.636364 | 62 | 0.729885 | [
"MIT"
] | sendbee/sendbee-python-client | sendbee_api/rate_limit/models.py | 348 | Python |
# Generated by Django 3.1 on 2020-10-07 00:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0016_job_auth_token'),
]
operations = [
migrations.AlterField(
model_name='report',
name='logs',
field=models.CharField(max_length=16384),
),
]
| 20.157895 | 53 | 0.597911 | [
"MIT"
] | Quving/jobby | api/resources/migrations/0017_auto_20201007_0022.py | 383 | Python |
"""Module to handle all events within AppDaemon."""
import uuid
from copy import deepcopy
import traceback
import datetime
from appdaemon.appdaemon import AppDaemon
import appdaemon.utils as utils
class Events:
"""Encapsulate event handling."""
def __init__(self, ad: AppDaemon):
"""Constructor.
Args:
ad: Reference to the AppDaemon object
"""
self.AD = ad
self.logger = ad.logging.get_child("_events")
#
# Events
#
async def add_event_callback(self, name, namespace, cb, event, **kwargs):
"""Adds a callback for an event which is called internally by apps.
Args:
name (str): Name of the app.
namespace (str): Namespace of the event.
cb: Callback function.
event (str): Name of the event.
**kwargs: List of values to filter on, and additional arguments to pass to the callback.
Returns:
``None`` or the reference to the callback handle.
"""
if self.AD.threading.validate_pin(name, kwargs) is True:
if "pin" in kwargs:
pin_app = kwargs["pin_app"]
else:
pin_app = self.AD.app_management.objects[name]["pin_app"]
if "pin_thread" in kwargs:
pin_thread = kwargs["pin_thread"]
pin_app = True
else:
pin_thread = self.AD.app_management.objects[name]["pin_thread"]
async with self.AD.callbacks.callbacks_lock:
if name not in self.AD.callbacks.callbacks:
self.AD.callbacks.callbacks[name] = {}
handle = uuid.uuid4().hex
self.AD.callbacks.callbacks[name][handle] = {
"name": name,
"id": self.AD.app_management.objects[name]["id"],
"type": "event",
"function": cb,
"namespace": namespace,
"event": event,
"pin_app": pin_app,
"pin_thread": pin_thread,
"kwargs": kwargs,
}
if "timeout" in kwargs:
exec_time = await self.AD.sched.get_now() + datetime.timedelta(seconds=int(kwargs["timeout"]))
kwargs["__timeout"] = await self.AD.sched.insert_schedule(
name, exec_time, None, False, None, __event_handle=handle,
)
await self.AD.state.add_entity(
"admin",
"event_callback.{}".format(handle),
"active",
{
"app": name,
"event_name": event,
"function": cb.__name__,
"pinned": pin_app,
"pinned_thread": pin_thread,
"fired": 0,
"executed": 0,
"kwargs": kwargs,
},
)
return handle
else:
return None
async def cancel_event_callback(self, name, handle):
"""Cancels an event callback.
Args:
name (str): Name of the app or module.
handle: Previously supplied callback handle for the callback.
Returns:
None.
"""
executed = False
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
del self.AD.callbacks.callbacks[name][handle]
await self.AD.state.remove_entity("admin", "event_callback.{}".format(handle))
executed = True
if name in self.AD.callbacks.callbacks and self.AD.callbacks.callbacks[name] == {}:
del self.AD.callbacks.callbacks[name]
if not executed:
self.logger.warning(
"Invalid callback handle '{}' in cancel_event_callback() from app {}".format(handle, name)
)
return executed
async def info_event_callback(self, name, handle):
"""Gets the information of an event callback.
Args:
name (str): Name of the app or subsystem.
handle: Previously supplied handle for the callback.
Returns:
A dictionary of callback entries or rise a ``ValueError`` if an invalid handle is provided.
"""
async with self.AD.callbacks.callbacks_lock:
if name in self.AD.callbacks.callbacks and handle in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][handle]
return callback["event"], callback["kwargs"].copy()
else:
raise ValueError("Invalid handle: {}".format(handle))
async def fire_event(self, namespace, event, **kwargs):
"""Fires an event.
If the namespace does not have a plugin associated with it, the event will be fired locally.
If a plugin is associated, the firing of the event will be delegated to the plugin, under the
understanding that when the event is fired, the plugin will notify appdaemon that it occurred,
usually via the system the plugin is communicating with.
Args:
namespace (str): Namespace for the event to be fired in.
event (str): Name of the event.
**kwargs: Arguments to associate with the event.
Returns:
None.
"""
self.logger.debug("fire_plugin_event() %s %s %s", namespace, event, kwargs)
plugin = await self.AD.plugins.get_plugin_object(namespace)
if hasattr(plugin, "fire_plugin_event"):
# We assume that the event will come back to us via the plugin
await plugin.fire_plugin_event(event, namespace, **kwargs)
else:
# Just fire the event locally
await self.AD.events.process_event(namespace, {"event_type": event, "data": kwargs})
async def process_event(self, namespace, data):
"""Processes an event that has been received either locally or from a plugin.
Args:
namespace (str): Namespace the event was fired in.
data: Data associated with the event.
Returns:
None.
"""
try:
# if data["event_type"] == "__AD_ENTITY_REMOVED":
# print("process event")
self.logger.debug("Event type:%s:", data["event_type"])
self.logger.debug(data["data"])
# Kick the scheduler so it updates it's clock
if self.AD.sched is not None and self.AD.sched.realtime is False and namespace != "admin":
await self.AD.sched.kick()
if data["event_type"] == "state_changed":
if "entity_id" in data["data"] and "new_state" in data["data"]:
if data["data"]["new_state"] is None:
# most likely it is a deleted entity
return
entity_id = data["data"]["entity_id"]
self.AD.state.set_state_simple(namespace, entity_id, data["data"]["new_state"])
if self.AD.apps is True and namespace != "admin":
await self.AD.state.process_state_callbacks(namespace, data)
else:
self.logger.warning("Malformed 'state_changed' event: %s", data["data"])
return
# Check for log callbacks and exit to prevent loops
if data["event_type"] == "__AD_LOG_EVENT":
if await self.has_log_callback(data["data"]["app_name"]):
self.logger.debug("Discarding event for loop avoidance")
return
await self.AD.logging.process_log_callbacks(namespace, data)
if self.AD.apps is True: # and namespace != "admin":
# Process callbacks
await self.process_event_callbacks(namespace, data)
#
# Send to the stream
#
if self.AD.http is not None:
if data["event_type"] == "state_changed":
if data["data"]["new_state"] == data["data"]["old_state"]:
# Nothing changed so don't send
return
# take a copy without TS if present as it breaks deepcopy and jason
if "ts" in data["data"]:
ts = data["data"].pop("ts")
mydata = deepcopy(data)
data["data"]["ts"] = ts
else:
mydata = deepcopy(data)
await self.AD.http.stream_update(namespace, mydata)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error during process_event()")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
async def has_log_callback(self, name):
"""Returns ``True`` if the app has a log callback, ``False`` otherwise.
Used to prevent callback loops. In the calling logic, if this function returns
``True`` the resulting logging event will be suppressed.
Args:
name (str): Name of the app.
"""
has_log_callback = False
if name == "AppDaemon._stream":
has_log_callback = True
else:
async with self.AD.callbacks.callbacks_lock:
for callback in self.AD.callbacks.callbacks:
for _uuid in self.AD.callbacks.callbacks[callback]:
cb = self.AD.callbacks.callbacks[callback][_uuid]
if cb["name"] == name and cb["type"] == "event" and cb["event"] == "__AD_LOG_EVENT":
has_log_callback = True
elif cb["name"] == name and cb["type"] == "log":
has_log_callback = True
return has_log_callback
async def process_event_callbacks(self, namespace, data):
"""Processes a pure event callback.
Locate any callbacks that may be registered for this event, check for filters and if appropriate,
dispatch the event for further checking and eventual action.
Args:
namespace (str): Namespace of the event.
data: Data associated with the event.
Returns:
None.
"""
self.logger.debug("process_event_callbacks() %s %s", namespace, data)
removes = []
async with self.AD.callbacks.callbacks_lock:
for name in self.AD.callbacks.callbacks.keys():
for uuid_ in self.AD.callbacks.callbacks[name]:
callback = self.AD.callbacks.callbacks[name][uuid_]
if callback["namespace"] == namespace or callback["namespace"] == "global" or namespace == "global":
#
# Check for either a blank event (for all events)
# Or the event is a match
# But don't allow a global listen for any system events (events that start with __)
#
if "event" in callback and (
(callback["event"] is None and data["event_type"][:2] != "__")
or data["event_type"] == callback["event"]
):
# Check any filters
_run = True
for key in callback["kwargs"]:
if key in data["data"] and callback["kwargs"][key] != data["data"][key]:
_run = False
if data["event_type"] == "__AD_LOG_EVENT":
if (
"log" in callback["kwargs"]
and callback["kwargs"]["log"] != data["data"]["log_type"]
):
_run = False
if _run:
if name in self.AD.app_management.objects:
executed = await self.AD.threading.dispatch_worker(
name,
{
"id": uuid_,
"name": name,
"objectid": self.AD.app_management.objects[name]["id"],
"type": "event",
"event": data["event_type"],
"function": callback["function"],
"data": data["data"],
"pin_app": callback["pin_app"],
"pin_thread": callback["pin_thread"],
"kwargs": callback["kwargs"],
},
)
# Remove the callback if appropriate
if executed is True:
remove = callback["kwargs"].get("oneshot", False)
if remove is True:
removes.append({"name": name, "uuid": uuid_})
for remove in removes:
await self.cancel_event_callback(remove["name"], remove["uuid"])
async def event_services(self, namespace, domain, service, kwargs):
if "event" in kwargs:
event = kwargs["event"]
del kwargs["event"]
await self.fire_event(namespace, event, **kwargs)
else:
self.logger.warning("Malformed 'fire_event' service call, as no event given")
@staticmethod
def sanitize_event_kwargs(app, kwargs):
kwargs_copy = kwargs.copy()
return utils._sanitize_kwargs(kwargs_copy, ["__silent"])
| 39.029973 | 120 | 0.507121 | [
"Apache-2.0"
] | DTTerastar/appdaemon | appdaemon/events.py | 14,324 | Python |
"""
Test lldb breakpoint setting by source regular expression.
This test just tests the source file & function restrictions.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestSourceRegexBreakpoints(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_location(self):
self.build()
self.source_regex_locations()
def test_restrictions(self):
self.build()
self.source_regex_restrictions()
def source_regex_locations(self):
""" Test that restricting source expressions to files & to functions. """
# Create a target by the debugger.
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# First look just in main:
target_files = lldb.SBFileSpecList()
target_files.Append(lldb.SBFileSpec("a.c"))
func_names = lldb.SBStringList()
func_names.AppendString("a_func")
source_regex = "Set . breakpoint here"
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, func_names)
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 1,
"a.c in a_func should give one breakpoint, got %d." %
(num_locations))
loc = main_break.GetLocationAtIndex(0)
self.assertTrue(loc.IsValid(), "Got a valid location.")
address = loc.GetAddress()
self.assertTrue(
address.IsValid(),
"Got a valid address from the location.")
a_func_line = line_number("a.c", "Set A breakpoint here")
line_entry = address.GetLineEntry()
self.assertTrue(line_entry.IsValid(), "Got a valid line entry.")
self.assertTrue(line_entry.line == a_func_line,
"Our line number matches the one lldbtest found.")
def source_regex_restrictions(self):
""" Test that restricting source expressions to files & to functions. """
# Create a target by the debugger.
exe = os.path.join(os.getcwd(), "a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# First look just in main:
target_files = lldb.SBFileSpecList()
target_files.Append(lldb.SBFileSpec("main.c"))
source_regex = "Set . breakpoint here"
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, lldb.SBStringList())
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 2,
"main.c should have 2 matches, got %d." %
(num_locations))
# Now look in both files:
target_files.Append(lldb.SBFileSpec("a.c"))
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, lldb.SBStringList())
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 4,
"main.c and a.c should have 4 matches, got %d." %
(num_locations))
# Now restrict it to functions:
func_names = lldb.SBStringList()
func_names.AppendString("main_func")
main_break = target.BreakpointCreateBySourceRegex(
source_regex, lldb.SBFileSpecList(), target_files, func_names)
num_locations = main_break.GetNumLocations()
self.assertTrue(
num_locations == 2,
"main_func in main.c and a.c should have 2 matches, got %d." %
(num_locations))
| 35.588785 | 83 | 0.645221 | [
"Apache-2.0"
] | Polidea/SiriusObfuscator | SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/functionalities/breakpoint/source_regexp/TestSourceRegexBreakpoints.py | 3,808 | Python |
import queue
from ..workers import Worker
from ..codes import WORKER_PROPERTIES
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class SocketHandler(metaclass=Singleton):
""" Socket Handler is a sigleton class used to handle/manage websocket connections. """
def __init__(self):
self.connections = {}
def new_connection(self, workerId: str, socket):
""" Create a mapping structure to establish a bond between a workerId and a socket descriptor.
Args:
workerId: UUID string used to identify workers.
socket: Socket descriptor that will be used to send/receive messages from this client.
Returns:
Worker: a worker instance with the corresponding workerId
"""
if workerId not in self.connections:
self.connections[workerId] = Worker(workerId, socket)
else:
worker = self.connections[workerId]
if worker.status == WORKER_PROPERTIES.OFFLINE:
worker._socket = socket
return self.connections[workerId]
def send_msg(self, workerId: str, message: str):
""" Find the socket descriptor mapped by workerId and send them a message.
Args:
workerId: UUID string used to identify and retrieve a worker.
message: Message to be send.
"""
socket = self.connections.get(workerId, None)
if socket:
socket.send(message)
def get(self, query):
"""Retrieve a worker by its UUID string or its socket descriptor."""
if isinstance(query, str):
return self.connections.get(query, None)
else:
return self.__retrieve_worker_by_socket(query)
def remove(self, socket) -> str:
""" Remove a socket descriptor from mapping structure. It will be used when the socket connection is closed.
Args:
socket: socket descriptor used to send/receive messages.
Returns:
workerId: Worker id linked to that connection.
"""
worker = self.__retrieve_worker_by_socket(socket)
if worker:
self.connections[worker._id]._socket = None
self.connections[worker._id].connected_nodes = []
return worker._id
def __retrieve_worker_by_socket(self, socket):
for worker_id, worker in self.connections.items():
if worker._socket == socket:
return self.connections[worker_id]
@property
def nodes(self) -> list:
"""Return all the connected nodes as a list of tuples of (worker_id, worker)"""
return list(self.connections.items())
def __len__(self) -> int:
""" Number of connections handled by this server.
Returns:
length: number of connections handled by this server.
"""
return len(self.connections)
| 34.666667 | 116 | 0.620192 | [
"Apache-2.0"
] | kuronosec/PyGridNetwork | gridnetwork/events/socket_handler.py | 3,120 | Python |
from polyphony import testbench
def fib(n):
if n <= 0: return 0
if n == 1: return 1
r0 = 0
r1 = 1
for i in range(n-1):
prev_r1 = r1
r1 = r0 + r1
r0 = prev_r1
return r1
@testbench
def test():
expect = [0,1,1,2,3,5,8,13,21,34,55,89,144,233,377,610]
for i in range(len(expect)):
result = fib(i)
assert expect[i] == result
print(i, "=>", result)
test()
| 18.782609 | 59 | 0.518519 | [
"MIT"
] | ryos36/polyphony-tutorial | Tutorial_2/fib.py | 432 | Python |
# <pep8-80 compliant>
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Nutti <[email protected]>"
__status__ = "production"
__version__ = "6.0"
__date__ = "26 Jan 2019"
import math
from math import atan2, cos, sqrt, sin, fabs
import bpy
from bpy.props import BoolProperty
import bmesh
from mathutils import Vector
from .. import common
from ..utils.bl_class_registry import BlClassRegistry
from ..utils.property_class_registry import PropertyClassRegistry
from ..utils import compatibility as compat
def _get_vco(verts_orig, loop):
"""
Get vertex original coordinate from loop
"""
for vo in verts_orig:
if vo["vidx"] == loop.vert.index and vo["moved"] is False:
return vo["vco"]
return loop.vert.co
def _get_link_loops(vert):
"""
Get loop linked to vertex
"""
link_loops = []
for f in vert.link_faces:
adj_loops = []
for loop in f.loops:
# self loop
if loop.vert == vert:
l = loop
# linked loop
else:
for e in loop.vert.link_edges:
if e.other_vert(loop.vert) == vert:
adj_loops.append(loop)
if len(adj_loops) < 2:
return None
link_loops.append({"l": l, "l0": adj_loops[0], "l1": adj_loops[1]})
return link_loops
def _get_ini_geom(link_loop, uv_layer, verts_orig, v_orig):
"""
Get initial geometory
(Get interior angle of face in vertex/UV space)
"""
u = link_loop["l"][uv_layer].uv
v0 = _get_vco(verts_orig, link_loop["l0"])
u0 = link_loop["l0"][uv_layer].uv
v1 = _get_vco(verts_orig, link_loop["l1"])
u1 = link_loop["l1"][uv_layer].uv
# get interior angle of face in vertex space
v0v1 = v1 - v0
v0v = v_orig["vco"] - v0
v1v = v_orig["vco"] - v1
theta0 = v0v1.angle(v0v)
theta1 = v0v1.angle(-v1v)
if (theta0 + theta1) > math.pi:
theta0 = v0v1.angle(-v0v)
theta1 = v0v1.angle(v1v)
# get interior angle of face in UV space
u0u1 = u1 - u0
u0u = u - u0
u1u = u - u1
phi0 = u0u1.angle(u0u)
phi1 = u0u1.angle(-u1u)
if (phi0 + phi1) > math.pi:
phi0 = u0u1.angle(-u0u)
phi1 = u0u1.angle(u1u)
# get direction of linked UV coordinate
# this will be used to judge whether angle is more or less than 180 degree
dir0 = u0u1.cross(u0u) > 0
dir1 = u0u1.cross(u1u) > 0
return {
"theta0": theta0,
"theta1": theta1,
"phi0": phi0,
"phi1": phi1,
"dir0": dir0,
"dir1": dir1}
def _get_target_uv(link_loop, uv_layer, verts_orig, v, ini_geom):
"""
Get target UV coordinate
"""
v0 = _get_vco(verts_orig, link_loop["l0"])
lo0 = link_loop["l0"]
v1 = _get_vco(verts_orig, link_loop["l1"])
lo1 = link_loop["l1"]
# get interior angle of face in vertex space
v0v1 = v1 - v0
v0v = v.co - v0
v1v = v.co - v1
theta0 = v0v1.angle(v0v)
theta1 = v0v1.angle(-v1v)
if (theta0 + theta1) > math.pi:
theta0 = v0v1.angle(-v0v)
theta1 = v0v1.angle(v1v)
# calculate target interior angle in UV space
phi0 = theta0 * ini_geom["phi0"] / ini_geom["theta0"]
phi1 = theta1 * ini_geom["phi1"] / ini_geom["theta1"]
uv0 = lo0[uv_layer].uv
uv1 = lo1[uv_layer].uv
# calculate target vertex coordinate from target interior angle
tuv0, tuv1 = _calc_tri_vert(uv0, uv1, phi0, phi1)
# target UV coordinate depends on direction, so judge using direction of
# linked UV coordinate
u0u1 = uv1 - uv0
u0u = tuv0 - uv0
u1u = tuv0 - uv1
dir0 = u0u1.cross(u0u) > 0
dir1 = u0u1.cross(u1u) > 0
if (ini_geom["dir0"] != dir0) or (ini_geom["dir1"] != dir1):
return tuv1
return tuv0
def _calc_tri_vert(v0, v1, angle0, angle1):
"""
Calculate rest coordinate from other coordinates and angle of end
"""
angle = math.pi - angle0 - angle1
alpha = atan2(v1.y - v0.y, v1.x - v0.x)
d = (v1.x - v0.x) / cos(alpha)
a = d * sin(angle0) / sin(angle)
b = d * sin(angle1) / sin(angle)
s = (a + b + d) / 2.0
if fabs(d) < 0.0000001:
xd = 0
yd = 0
else:
r = s * (s - a) * (s - b) * (s - d)
if r < 0:
xd = 0
yd = 0
else:
xd = (b * b - a * a + d * d) / (2 * d)
yd = 2 * sqrt(r) / d
x1 = xd * cos(alpha) - yd * sin(alpha) + v0.x
y1 = xd * sin(alpha) + yd * cos(alpha) + v0.y
x2 = xd * cos(alpha) + yd * sin(alpha) + v0.x
y2 = xd * sin(alpha) - yd * cos(alpha) + v0.y
return Vector((x1, y1)), Vector((x2, y2))
def _is_valid_context(context):
obj = context.object
# only edit mode is allowed to execute
if obj is None:
return False
if obj.type != 'MESH':
return False
if context.object.mode != 'EDIT':
return False
# only 'VIEW_3D' space is allowed to execute
for space in context.area.spaces:
if space.type == 'VIEW_3D':
break
else:
return False
return True
@PropertyClassRegistry()
class _Properties:
idname = "texture_lock"
@classmethod
def init_props(cls, scene):
class Props():
verts_orig = None
scene.muv_props.texture_lock = Props()
def get_func(_):
return MUV_OT_TextureLock_Intr.is_running(bpy.context)
def set_func(_, __):
pass
def update_func(_, __):
bpy.ops.uv.muv_ot_texture_lock_intr('INVOKE_REGION_WIN')
scene.muv_texture_lock_enabled = BoolProperty(
name="Texture Lock Enabled",
description="Texture Lock is enabled",
default=False
)
scene.muv_texture_lock_lock = BoolProperty(
name="Texture Lock Locked",
description="Texture Lock is locked",
default=False,
get=get_func,
set=set_func,
update=update_func
)
scene.muv_texture_lock_connect = BoolProperty(
name="Connect UV",
default=True
)
@classmethod
def del_props(cls, scene):
del scene.muv_props.texture_lock
del scene.muv_texture_lock_enabled
del scene.muv_texture_lock_lock
del scene.muv_texture_lock_connect
@BlClassRegistry()
class MUV_OT_TextureLock_Lock(bpy.types.Operator):
"""
Operation class: Lock Texture
"""
bl_idname = "uv.muv_ot_texture_lock_lock"
bl_label = "Lock Texture"
bl_description = "Lock Texture"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
# we can not get area/space/region from console
if common.is_console_mode():
return True
return _is_valid_context(context)
@classmethod
def is_ready(cls, context):
sc = context.scene
props = sc.muv_props.texture_lock
if props.verts_orig:
return True
return False
def execute(self, context):
props = context.scene.muv_props.texture_lock
obj = bpy.context.active_object
bm = bmesh.from_edit_mesh(obj.data)
if common.check_version(2, 73, 0) >= 0:
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
if not bm.loops.layers.uv:
self.report({'WARNING'}, "Object must have more than one UV map")
return {'CANCELLED'}
props.verts_orig = [
{"vidx": v.index, "vco": v.co.copy(), "moved": False}
for v in bm.verts if v.select]
return {'FINISHED'}
@BlClassRegistry()
@compat.make_annotations
class MUV_OT_TextureLock_Unlock(bpy.types.Operator):
"""
Operation class: Unlock Texture
"""
bl_idname = "uv.muv_ot_texture_lock_unlock"
bl_label = "Unlock Texture"
bl_description = "Unlock Texture"
bl_options = {'REGISTER', 'UNDO'}
connect = BoolProperty(
name="Connect UV",
default=True
)
@classmethod
def poll(cls, context):
# we can not get area/space/region from console
if common.is_console_mode():
return True
sc = context.scene
props = sc.muv_props.texture_lock
if not props.verts_orig:
return False
if not MUV_OT_TextureLock_Lock.is_ready(context):
return False
if not _is_valid_context(context):
return False
return True
def execute(self, context):
sc = context.scene
props = sc.muv_props.texture_lock
obj = bpy.context.active_object
bm = bmesh.from_edit_mesh(obj.data)
if common.check_version(2, 73, 0) >= 0:
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
if not bm.loops.layers.uv:
self.report({'WARNING'}, "Object must have more than one UV map")
return {'CANCELLED'}
uv_layer = bm.loops.layers.uv.verify()
verts = [v.index for v in bm.verts if v.select]
verts_orig = props.verts_orig
# move UV followed by vertex coordinate
for vidx, v_orig in zip(verts, verts_orig):
if vidx != v_orig["vidx"]:
self.report({'ERROR'}, "Internal Error")
return {"CANCELLED"}
v = bm.verts[vidx]
link_loops = _get_link_loops(v)
result = []
for ll in link_loops:
ini_geom = _get_ini_geom(ll, uv_layer, verts_orig, v_orig)
target_uv = _get_target_uv(
ll, uv_layer, verts_orig, v, ini_geom)
result.append({"l": ll["l"], "uv": target_uv})
# connect other face's UV
if self.connect:
ave = Vector((0.0, 0.0))
for r in result:
ave = ave + r["uv"]
ave = ave / len(result)
for r in result:
r["l"][uv_layer].uv = ave
else:
for r in result:
r["l"][uv_layer].uv = r["uv"]
v_orig["moved"] = True
bmesh.update_edit_mesh(obj.data)
props.verts_orig = None
return {'FINISHED'}
@BlClassRegistry()
class MUV_OT_TextureLock_Intr(bpy.types.Operator):
"""
Operation class: Texture Lock (Interactive mode)
"""
bl_idname = "uv.muv_ot_texture_lock_intr"
bl_label = "Texture Lock (Interactive mode)"
bl_description = "Internal operation for Texture Lock (Interactive mode)"
__timer = None
@classmethod
def poll(cls, context):
# we can not get area/space/region from console
if common.is_console_mode():
return False
return _is_valid_context(context)
@classmethod
def is_running(cls, _):
return 1 if cls.__timer else 0
@classmethod
def handle_add(cls, ops_obj, context):
if cls.__timer is None:
cls.__timer = context.window_manager.event_timer_add(
0.10, window=context.window)
context.window_manager.modal_handler_add(ops_obj)
@classmethod
def handle_remove(cls, context):
if cls.__timer is not None:
context.window_manager.event_timer_remove(cls.__timer)
cls.__timer = None
def __init__(self):
self.__intr_verts_orig = []
self.__intr_verts = []
def __sel_verts_changed(self, context):
obj = context.active_object
bm = bmesh.from_edit_mesh(obj.data)
if common.check_version(2, 73, 0) >= 0:
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
prev = set(self.__intr_verts)
now = set([v.index for v in bm.verts if v.select])
return prev != now
def __reinit_verts(self, context):
obj = context.active_object
bm = bmesh.from_edit_mesh(obj.data)
if common.check_version(2, 73, 0) >= 0:
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
self.__intr_verts_orig = [
{"vidx": v.index, "vco": v.co.copy(), "moved": False}
for v in bm.verts if v.select]
self.__intr_verts = [v.index for v in bm.verts if v.select]
def __update_uv(self, context):
"""
Update UV when vertex coordinates are changed
"""
obj = context.active_object
bm = bmesh.from_edit_mesh(obj.data)
if common.check_version(2, 73, 0) >= 0:
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
if not bm.loops.layers.uv:
self.report({'WARNING'}, "Object must have more than one UV map")
return
uv_layer = bm.loops.layers.uv.verify()
verts = [v.index for v in bm.verts if v.select]
verts_orig = self.__intr_verts_orig
for vidx, v_orig in zip(verts, verts_orig):
if vidx != v_orig["vidx"]:
self.report({'ERROR'}, "Internal Error")
return
v = bm.verts[vidx]
link_loops = _get_link_loops(v)
result = []
for ll in link_loops:
ini_geom = _get_ini_geom(ll, uv_layer, verts_orig, v_orig)
target_uv = _get_target_uv(
ll, uv_layer, verts_orig, v, ini_geom)
result.append({"l": ll["l"], "uv": target_uv})
# UV connect option is always true, because it raises
# unexpected behavior
ave = Vector((0.0, 0.0))
for r in result:
ave = ave + r["uv"]
ave = ave / len(result)
for r in result:
r["l"][uv_layer].uv = ave
v_orig["moved"] = True
bmesh.update_edit_mesh(obj.data)
common.redraw_all_areas()
self.__intr_verts_orig = [
{"vidx": v.index, "vco": v.co.copy(), "moved": False}
for v in bm.verts if v.select]
def modal(self, context, event):
if not _is_valid_context(context):
MUV_OT_TextureLock_Intr.handle_remove(context)
return {'FINISHED'}
if not MUV_OT_TextureLock_Intr.is_running(context):
return {'FINISHED'}
if context.area:
context.area.tag_redraw()
if event.type == 'TIMER':
if self.__sel_verts_changed(context):
self.__reinit_verts(context)
else:
self.__update_uv(context)
return {'PASS_THROUGH'}
def invoke(self, context, _):
if not _is_valid_context(context):
return {'CANCELLED'}
if not MUV_OT_TextureLock_Intr.is_running(context):
MUV_OT_TextureLock_Intr.handle_add(self, context)
return {'RUNNING_MODAL'}
else:
MUV_OT_TextureLock_Intr.handle_remove(context)
if context.area:
context.area.tag_redraw()
return {'FINISHED'}
| 29.702602 | 78 | 0.584293 | [
"Unlicense"
] | byteinc/Phasor | engine/2.80/scripts/addons/magic_uv/op/texture_lock.py | 15,980 | Python |
from LAMARCK_ML.models.interface import ModellUtil
class DataSaverInterface(ModellUtil):
def __init__(self, **kwargs):
super(DataSaverInterface, self).__init__(**kwargs)
def get_individual_by_name(self, name):
raise NotImplementedError()
def get_ancestry_for_ind(self, ind_name):
raise NotImplementedError()
def get_ancestries(self):
raise NotImplementedError()
def get_individual_names(self):
raise NotImplementedError() | 25.388889 | 54 | 0.770241 | [
"Apache-2.0",
"BSD-3-Clause"
] | JonasDHomburg/LAMARCK | LAMARCK_ML/utils/dataSaver/interface.py | 457 | Python |
# Original code from https://github.com/araffin/robotics-rl-srl
# Authors: Antonin Raffin, René Traoré, Ashley Hill
import argparse
import cv2 # pytype: disable=import-error
import numpy as np
from ae.autoencoder import Autoencoder
def create_figure_and_sliders(name, state_dim):
"""
Creating a window for the latent space visualization,
and another one for the sliders to control it.
:param name: name of model (str)
:param state_dim: (int)
:return:
"""
# opencv gui setup
cv2.namedWindow(name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(name, 500, 500)
cv2.namedWindow("slider for " + name)
# add a slider for each component of the latent space
for i in range(state_dim):
# the sliders MUST be between 0 and max, so we placed max at 100, and start at 50
# So that when we substract 50 and divide 10 we get [-5,5] for each component
cv2.createTrackbar(str(i), "slider for " + name, 50, 100, (lambda a: None))
def main():
parser = argparse.ArgumentParser(description="latent space enjoy")
parser.add_argument("--log-dir", default="", type=str, help="directory to load model")
parser.add_argument("-ae", "--ae-path", help="Path to saved AE", type=str, default="")
args = parser.parse_args()
autoencoder = Autoencoder.load(args.ae_path)
fig_name = "Decoder for the AE"
# TODO: load data to infer bounds
bound_min = -10
bound_max = 10
create_figure_and_sliders(fig_name, autoencoder.z_size)
should_exit = False
while not should_exit:
# stop if escape is pressed
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
state = []
for i in range(autoencoder.z_size):
state.append(cv2.getTrackbarPos(str(i), "slider for " + fig_name))
# Rescale the values to fit the bounds of the representation
state = (np.array(state) / 100) * (bound_max - bound_min) + bound_min
reconstructed_image = autoencoder.decode(state[None])[0]
# stop if user closed a window
if (cv2.getWindowProperty(fig_name, 0) < 0) or (cv2.getWindowProperty("slider for " + fig_name, 0) < 0):
should_exit = True
break
cv2.imshow(fig_name, reconstructed_image)
# gracefully close
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
| 31.6 | 112 | 0.656962 | [
"MIT"
] | araffin/aae-train-donkeycar | ae/enjoy_latent.py | 2,372 | Python |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ppdet.core.workspace import register
from ppdet.modeling.bbox_utils import nonempty_bbox, rbox2poly
from ppdet.modeling.layers import TTFBox
from .transformers import bbox_cxcywh_to_xyxy
try:
from collections.abc import Sequence
except Exception:
from collections import Sequence
__all__ = [
'BBoxPostProcess', 'MaskPostProcess', 'FCOSPostProcess',
'S2ANetBBoxPostProcess', 'JDEBBoxPostProcess', 'CenterNetPostProcess',
'DETRBBoxPostProcess', 'SparsePostProcess'
]
@register
class BBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=80, decode=None, nms=None):
super(BBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.fake_bboxes = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, head_out, rois, im_shape, scale_factor):
"""
Decode the bbox and do NMS if needed.
Args:
head_out (tuple): bbox_pred and cls_prob of bbox_head output.
rois (tuple): roi and rois_num of rpn_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
"""
if self.nms is not None:
bboxes, score = self.decode(head_out, rois, im_shape, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score, self.num_classes)
else:
bbox_pred, bbox_num = self.decode(head_out, rois, im_shape,
scale_factor)
return bbox_pred, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Notes:
Currently only support bs = 1.
Args:
bboxes (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
pred_result (Tensor): The final prediction results with shape [N, 6]
including labels, scores and bboxes.
"""
if bboxes.shape[0] == 0:
bboxes = self.fake_bboxes
bbox_num = self.fake_bbox_num
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([scale_x, scale_y, scale_x, scale_y])
expand_scale = paddle.expand(scale, [bbox_num[i], 4])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
self.origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 6], label, score, bbox
pred_label = bboxes[:, 0:1]
pred_score = bboxes[:, 1:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
scaled_bbox = pred_bbox / scale_factor_list
origin_h = self.origin_shape_list[:, 0]
origin_w = self.origin_shape_list[:, 1]
zeros = paddle.zeros_like(origin_h)
# clip bbox to [0, original_size]
x1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 0], origin_w), zeros)
y1 = paddle.maximum(paddle.minimum(scaled_bbox[:, 1], origin_h), zeros)
x2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 2], origin_w), zeros)
y2 = paddle.maximum(paddle.minimum(scaled_bbox[:, 3], origin_h), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2], axis=-1)
# filter empty bbox
keep_mask = nonempty_bbox(pred_bbox, return_mask=True)
keep_mask = paddle.unsqueeze(keep_mask, [1])
pred_label = paddle.where(keep_mask, pred_label,
paddle.ones_like(pred_label) * -1)
pred_result = paddle.concat([pred_label, pred_score, pred_bbox], axis=1)
return pred_result
def get_origin_shape(self, ):
return self.origin_shape_list
@register
class MaskPostProcess(object):
"""
refer to:
https://github.com/facebookresearch/detectron2/layers/mask_ops.py
Get Mask output according to the output from model
"""
def __init__(self, binary_thresh=0.5):
super(MaskPostProcess, self).__init__()
self.binary_thresh = binary_thresh
def paste_mask(self, masks, boxes, im_h, im_w):
"""
Paste the mask prediction to the original image.
"""
x0, y0, x1, y1 = paddle.split(boxes, 4, axis=1)
masks = paddle.unsqueeze(masks, [0, 1])
img_y = paddle.arange(0, im_h, dtype='float32') + 0.5
img_x = paddle.arange(0, im_w, dtype='float32') + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
img_x = paddle.unsqueeze(img_x, [1])
img_y = paddle.unsqueeze(img_y, [2])
N = boxes.shape[0]
gx = paddle.expand(img_x, [N, img_y.shape[1], img_x.shape[2]])
gy = paddle.expand(img_y, [N, img_y.shape[1], img_x.shape[2]])
grid = paddle.stack([gx, gy], axis=3)
img_masks = F.grid_sample(masks, grid, align_corners=False)
return img_masks[:, 0]
def __call__(self, mask_out, bboxes, bbox_num, origin_shape):
"""
Decode the mask_out and paste the mask to the origin image.
Args:
mask_out (Tensor): mask_head output with shape [N, 28, 28].
bbox_pred (Tensor): The output bboxes with shape [N, 6] after decode
and NMS, including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [1], and is N.
origin_shape (Tensor): The origin shape of the input image, the tensor
shape is [N, 2], and each row is [h, w].
Returns:
pred_result (Tensor): The final prediction mask results with shape
[N, h, w] in binary mask style.
"""
num_mask = mask_out.shape[0]
origin_shape = paddle.cast(origin_shape, 'int32')
# TODO: support bs > 1 and mask output dtype is bool
pred_result = paddle.zeros(
[num_mask, origin_shape[0][0], origin_shape[0][1]], dtype='int32')
if bbox_num == 1 and bboxes[0][0] == -1:
return pred_result
# TODO: optimize chunk paste
pred_result = []
for i in range(bboxes.shape[0]):
im_h, im_w = origin_shape[i][0], origin_shape[i][1]
pred_mask = self.paste_mask(mask_out[i], bboxes[i:i + 1, 2:], im_h,
im_w)
pred_mask = pred_mask >= self.binary_thresh
pred_mask = paddle.cast(pred_mask, 'int32')
pred_result.append(pred_mask)
pred_result = paddle.concat(pred_result)
return pred_result
@register
class FCOSPostProcess(object):
__inject__ = ['decode', 'nms']
def __init__(self, decode=None, nms=None):
super(FCOSPostProcess, self).__init__()
self.decode = decode
self.nms = nms
def __call__(self, fcos_head_outs, scale_factor):
"""
Decode the bbox and do NMS in FCOS.
"""
locations, cls_logits, bboxes_reg, centerness = fcos_head_outs
bboxes, score = self.decode(locations, cls_logits, bboxes_reg,
centerness, scale_factor)
bbox_pred, bbox_num, _ = self.nms(bboxes, score)
return bbox_pred, bbox_num
@register
class S2ANetBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['nms']
def __init__(self, num_classes=15, nms_pre=2000, min_bbox_size=0, nms=None):
super(S2ANetBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.nms_pre = paddle.to_tensor(nms_pre)
self.min_bbox_size = min_bbox_size
self.nms = nms
self.origin_shape_list = []
self.fake_pred_cls_score_bbox = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]],
dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
def forward(self, pred_scores, pred_bboxes):
"""
pred_scores : [N, M] score
pred_bboxes : [N, 5] xc, yc, w, h, a
im_shape : [N, 2] im_shape
scale_factor : [N, 2] scale_factor
"""
pred_ploys0 = rbox2poly(pred_bboxes)
pred_ploys = paddle.unsqueeze(pred_ploys0, axis=0)
# pred_scores [NA, 16] --> [16, NA]
pred_scores0 = paddle.transpose(pred_scores, [1, 0])
pred_scores = paddle.unsqueeze(pred_scores0, axis=0)
pred_cls_score_bbox, bbox_num, _ = self.nms(pred_ploys, pred_scores,
self.num_classes)
# Prevent empty bbox_pred from decode or NMS.
# Bboxes and score before NMS may be empty due to the score threshold.
if pred_cls_score_bbox.shape[0] <= 0 or pred_cls_score_bbox.shape[
1] <= 1:
pred_cls_score_bbox = self.fake_pred_cls_score_bbox
bbox_num = self.fake_bbox_num
pred_cls_score_bbox = paddle.reshape(pred_cls_score_bbox, [-1, 10])
return pred_cls_score_bbox, bbox_num
def get_pred(self, bboxes, bbox_num, im_shape, scale_factor):
"""
Rescale, clip and filter the bbox from the output of NMS to
get final prediction.
Args:
bboxes(Tensor): bboxes [N, 10]
bbox_num(Tensor): bbox_num
im_shape(Tensor): [1 2]
scale_factor(Tensor): [1 2]
Returns:
bbox_pred(Tensor): The output is the prediction with shape [N, 8]
including labels, scores and bboxes. The size of
bboxes are corresponding to the original image.
"""
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
origin_shape_list = []
scale_factor_list = []
# scale_factor: scale_y, scale_x
for i in range(bbox_num.shape[0]):
expand_shape = paddle.expand(origin_shape[i:i + 1, :],
[bbox_num[i], 2])
scale_y, scale_x = scale_factor[i][0], scale_factor[i][1]
scale = paddle.concat([
scale_x, scale_y, scale_x, scale_y, scale_x, scale_y, scale_x,
scale_y
])
expand_scale = paddle.expand(scale, [bbox_num[i], 8])
origin_shape_list.append(expand_shape)
scale_factor_list.append(expand_scale)
origin_shape_list = paddle.concat(origin_shape_list)
scale_factor_list = paddle.concat(scale_factor_list)
# bboxes: [N, 10], label, score, bbox
pred_label_score = bboxes[:, 0:2]
pred_bbox = bboxes[:, 2:]
# rescale bbox to original image
pred_bbox = pred_bbox.reshape([-1, 8])
scaled_bbox = pred_bbox / scale_factor_list
origin_h = origin_shape_list[:, 0]
origin_w = origin_shape_list[:, 1]
bboxes = scaled_bbox
zeros = paddle.zeros_like(origin_h)
x1 = paddle.maximum(paddle.minimum(bboxes[:, 0], origin_w - 1), zeros)
y1 = paddle.maximum(paddle.minimum(bboxes[:, 1], origin_h - 1), zeros)
x2 = paddle.maximum(paddle.minimum(bboxes[:, 2], origin_w - 1), zeros)
y2 = paddle.maximum(paddle.minimum(bboxes[:, 3], origin_h - 1), zeros)
x3 = paddle.maximum(paddle.minimum(bboxes[:, 4], origin_w - 1), zeros)
y3 = paddle.maximum(paddle.minimum(bboxes[:, 5], origin_h - 1), zeros)
x4 = paddle.maximum(paddle.minimum(bboxes[:, 6], origin_w - 1), zeros)
y4 = paddle.maximum(paddle.minimum(bboxes[:, 7], origin_h - 1), zeros)
pred_bbox = paddle.stack([x1, y1, x2, y2, x3, y3, x4, y4], axis=-1)
pred_result = paddle.concat([pred_label_score, pred_bbox], axis=1)
return pred_result
@register
class JDEBBoxPostProcess(nn.Layer):
__shared__ = ['num_classes']
__inject__ = ['decode', 'nms']
def __init__(self, num_classes=1, decode=None, nms=None, return_idx=True):
super(JDEBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.decode = decode
self.nms = nms
self.return_idx = return_idx
self.fake_bbox_pred = paddle.to_tensor(
np.array(
[[-1, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype='float32'))
self.fake_bbox_num = paddle.to_tensor(np.array([1], dtype='int32'))
self.fake_nms_keep_idx = paddle.to_tensor(
np.array(
[[0]], dtype='int32'))
self.fake_yolo_boxes_out = paddle.to_tensor(
np.array(
[[[0.0, 0.0, 0.0, 0.0]]], dtype='float32'))
self.fake_yolo_scores_out = paddle.to_tensor(
np.array(
[[[0.0]]], dtype='float32'))
self.fake_boxes_idx = paddle.to_tensor(np.array([[0]], dtype='int64'))
def forward(self, head_out, anchors):
"""
Decode the bbox and do NMS for JDE model.
Args:
head_out (list): Bbox_pred and cls_prob of bbox_head output.
anchors (list): Anchors of JDE model.
Returns:
boxes_idx (Tensor): The index of kept bboxes after decode 'JDEBox'.
bbox_pred (Tensor): The output is the prediction with shape [N, 6]
including labels, scores and bboxes.
bbox_num (Tensor): The number of prediction of each batch with shape [N].
nms_keep_idx (Tensor): The index of kept bboxes after NMS.
"""
boxes_idx, yolo_boxes_scores = self.decode(head_out, anchors)
if len(boxes_idx) == 0:
boxes_idx = self.fake_boxes_idx
yolo_boxes_out = self.fake_yolo_boxes_out
yolo_scores_out = self.fake_yolo_scores_out
else:
yolo_boxes = paddle.gather_nd(yolo_boxes_scores, boxes_idx)
# TODO: only support bs=1 now
yolo_boxes_out = paddle.reshape(
yolo_boxes[:, :4], shape=[1, len(boxes_idx), 4])
yolo_scores_out = paddle.reshape(
yolo_boxes[:, 4:5], shape=[1, 1, len(boxes_idx)])
boxes_idx = boxes_idx[:, 1:]
if self.return_idx:
bbox_pred, bbox_num, nms_keep_idx = self.nms(
yolo_boxes_out, yolo_scores_out, self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
nms_keep_idx = self.fake_nms_keep_idx
return boxes_idx, bbox_pred, bbox_num, nms_keep_idx
else:
bbox_pred, bbox_num, _ = self.nms(yolo_boxes_out, yolo_scores_out,
self.num_classes)
if bbox_pred.shape[0] == 0:
bbox_pred = self.fake_bbox_pred
bbox_num = self.fake_bbox_num
return _, bbox_pred, bbox_num, _
@register
class CenterNetPostProcess(TTFBox):
"""
Postprocess the model outputs to get final prediction:
1. Do NMS for heatmap to get top `max_per_img` bboxes.
2. Decode bboxes using center offset and box size.
3. Rescale decoded bboxes reference to the origin image shape.
Args:
max_per_img(int): the maximum number of predicted objects in a image,
500 by default.
down_ratio(int): the down ratio from images to heatmap, 4 by default.
regress_ltrb (bool): whether to regress left/top/right/bottom or
width/height for a box, true by default.
for_mot (bool): whether return other features used in tracking model.
"""
__shared__ = ['down_ratio', 'for_mot']
def __init__(self,
max_per_img=500,
down_ratio=4,
regress_ltrb=True,
for_mot=False):
super(TTFBox, self).__init__()
self.max_per_img = max_per_img
self.down_ratio = down_ratio
self.regress_ltrb = regress_ltrb
self.for_mot = for_mot
def __call__(self, hm, wh, reg, im_shape, scale_factor):
heat = self._simple_nms(hm)
scores, inds, topk_clses, ys, xs = self._topk(heat)
scores = paddle.tensor.unsqueeze(scores, [1])
clses = paddle.tensor.unsqueeze(topk_clses, [1])
reg_t = paddle.transpose(reg, [0, 2, 3, 1])
# Like TTFBox, batch size is 1.
# TODO: support batch size > 1
reg = paddle.reshape(reg_t, [-1, paddle.shape(reg_t)[-1]])
reg = paddle.gather(reg, inds)
xs = paddle.cast(xs, 'float32')
ys = paddle.cast(ys, 'float32')
xs = xs + reg[:, 0:1]
ys = ys + reg[:, 1:2]
wh_t = paddle.transpose(wh, [0, 2, 3, 1])
wh = paddle.reshape(wh_t, [-1, paddle.shape(wh_t)[-1]])
wh = paddle.gather(wh, inds)
if self.regress_ltrb:
x1 = xs - wh[:, 0:1]
y1 = ys - wh[:, 1:2]
x2 = xs + wh[:, 2:3]
y2 = ys + wh[:, 3:4]
else:
x1 = xs - wh[:, 0:1] / 2
y1 = ys - wh[:, 1:2] / 2
x2 = xs + wh[:, 0:1] / 2
y2 = ys + wh[:, 1:2] / 2
n, c, feat_h, feat_w = hm.shape[:]
padw = (feat_w * self.down_ratio - im_shape[0, 1]) / 2
padh = (feat_h * self.down_ratio - im_shape[0, 0]) / 2
x1 = x1 * self.down_ratio
y1 = y1 * self.down_ratio
x2 = x2 * self.down_ratio
y2 = y2 * self.down_ratio
x1 = x1 - padw
y1 = y1 - padh
x2 = x2 - padw
y2 = y2 - padh
bboxes = paddle.concat([x1, y1, x2, y2], axis=1)
scale_y = scale_factor[:, 0:1]
scale_x = scale_factor[:, 1:2]
scale_expand = paddle.concat(
[scale_x, scale_y, scale_x, scale_y], axis=1)
boxes_shape = paddle.shape(bboxes)
boxes_shape.stop_gradient = True
scale_expand = paddle.expand(scale_expand, shape=boxes_shape)
bboxes = paddle.divide(bboxes, scale_expand)
if self.for_mot:
results = paddle.concat([bboxes, scores, clses], axis=1)
return results, inds, topk_clses
else:
results = paddle.concat([clses, scores, bboxes], axis=1)
return results, paddle.shape(results)[0:1], topk_clses
@register
class DETRBBoxPostProcess(object):
__shared__ = ['num_classes', 'use_focal_loss']
__inject__ = []
def __init__(self,
num_classes=80,
num_top_queries=100,
use_focal_loss=False):
super(DETRBBoxPostProcess, self).__init__()
self.num_classes = num_classes
self.num_top_queries = num_top_queries
self.use_focal_loss = use_focal_loss
def __call__(self, head_out, im_shape, scale_factor):
"""
Decode the bbox.
Args:
head_out (tuple): bbox_pred, cls_logit and masks of bbox_head output.
im_shape (Tensor): The shape of the input image.
scale_factor (Tensor): The scale factor of the input image.
Returns:
bbox_pred (Tensor): The output prediction with shape [N, 6], including
labels, scores and bboxes. The size of bboxes are corresponding
to the input image, the bboxes may be used in other branch.
bbox_num (Tensor): The number of prediction boxes of each batch with
shape [bs], and is N.
"""
bboxes, logits, masks = head_out
bbox_pred = bbox_cxcywh_to_xyxy(bboxes)
origin_shape = paddle.floor(im_shape / scale_factor + 0.5)
img_h, img_w = origin_shape.unbind(1)
origin_shape = paddle.stack(
[img_w, img_h, img_w, img_h], axis=-1).unsqueeze(0)
bbox_pred *= origin_shape
scores = F.sigmoid(logits) if self.use_focal_loss else F.softmax(
logits)[:, :, :-1]
if not self.use_focal_loss:
scores, labels = scores.max(-1), scores.argmax(-1)
if scores.shape[1] > self.num_top_queries:
scores, index = paddle.topk(
scores, self.num_top_queries, axis=-1)
labels = paddle.stack(
[paddle.gather(l, i) for l, i in zip(labels, index)])
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
else:
scores, index = paddle.topk(
scores.reshape([logits.shape[0], -1]),
self.num_top_queries,
axis=-1)
labels = index % logits.shape[2]
index = index // logits.shape[2]
bbox_pred = paddle.stack(
[paddle.gather(b, i) for b, i in zip(bbox_pred, index)])
bbox_pred = paddle.concat(
[
labels.unsqueeze(-1).astype('float32'), scores.unsqueeze(-1),
bbox_pred
],
axis=-1)
bbox_num = paddle.to_tensor(
bbox_pred.shape[1], dtype='int32').tile([bbox_pred.shape[0]])
bbox_pred = bbox_pred.reshape([-1, 6])
return bbox_pred, bbox_num
@register
class SparsePostProcess(object):
__shared__ = ['num_classes']
def __init__(self, num_proposals, num_classes=80):
super(SparsePostProcess, self).__init__()
self.num_classes = num_classes
self.num_proposals = num_proposals
def __call__(self, box_cls, box_pred, scale_factor_wh, img_whwh):
"""
Arguments:
box_cls (Tensor): tensor of shape (batch_size, num_proposals, K).
The tensor predicts the classification probability for each proposal.
box_pred (Tensor): tensors of shape (batch_size, num_proposals, 4).
The tensor predicts 4-vector (x,y,w,h) box
regression values for every proposal
scale_factor_wh (Tensor): tensors of shape [batch_size, 2] the scalor of per img
img_whwh (Tensor): tensors of shape [batch_size, 4]
Returns:
bbox_pred (Tensor): tensors of shape [num_boxes, 6] Each row has 6 values:
[label, confidence, xmin, ymin, xmax, ymax]
bbox_num (Tensor): tensors of shape [batch_size] the number of RoIs in each image.
"""
assert len(box_cls) == len(scale_factor_wh) == len(img_whwh)
img_wh = img_whwh[:, :2]
scores = F.sigmoid(box_cls)
labels = paddle.arange(0, self.num_classes). \
unsqueeze(0).tile([self.num_proposals, 1]).flatten(start_axis=0, stop_axis=1)
classes_all = []
scores_all = []
boxes_all = []
for i, (scores_per_image,
box_pred_per_image) in enumerate(zip(scores, box_pred)):
scores_per_image, topk_indices = scores_per_image.flatten(
0, 1).topk(
self.num_proposals, sorted=False)
labels_per_image = paddle.gather(labels, topk_indices, axis=0)
box_pred_per_image = box_pred_per_image.reshape([-1, 1, 4]).tile(
[1, self.num_classes, 1]).reshape([-1, 4])
box_pred_per_image = paddle.gather(
box_pred_per_image, topk_indices, axis=0)
classes_all.append(labels_per_image)
scores_all.append(scores_per_image)
boxes_all.append(box_pred_per_image)
bbox_num = paddle.zeros([len(scale_factor_wh)], dtype="int32")
boxes_final = []
for i in range(len(scale_factor_wh)):
classes = classes_all[i]
boxes = boxes_all[i]
scores = scores_all[i]
boxes[:, 0::2] = paddle.clip(
boxes[:, 0::2], min=0, max=img_wh[i][0]) / scale_factor_wh[i][0]
boxes[:, 1::2] = paddle.clip(
boxes[:, 1::2], min=0, max=img_wh[i][1]) / scale_factor_wh[i][1]
boxes_w, boxes_h = (boxes[:, 2] - boxes[:, 0]).numpy(), (
boxes[:, 3] - boxes[:, 1]).numpy()
keep = (boxes_w > 1.) & (boxes_h > 1.)
if (keep.sum() == 0):
bboxes = paddle.zeros([1, 6]).astype("float32")
else:
boxes = paddle.to_tensor(boxes.numpy()[keep]).astype("float32")
classes = paddle.to_tensor(classes.numpy()[keep]).astype(
"float32").unsqueeze(-1)
scores = paddle.to_tensor(scores.numpy()[keep]).astype(
"float32").unsqueeze(-1)
bboxes = paddle.concat([classes, scores, boxes], axis=-1)
boxes_final.append(bboxes)
bbox_num[i] = bboxes.shape[0]
bbox_pred = paddle.concat(boxes_final)
return bbox_pred, bbox_num
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return dets[[], :]
scores = dets[:, 0]
x1 = dets[:, 1]
y1 = dets[:, 2]
x2 = dets[:, 3]
y2 = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
ndets = dets.shape[0]
suppressed = np.zeros((ndets), dtype=np.int)
# nominal indices
# _i, _j
# sorted indices
# i, j
# temp variables for box i's (the box currently under consideration)
# ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
# xx1, yy1, xx2, yy2
# w, h
# inter, ovr
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
keep = np.where(suppressed == 0)[0]
dets = dets[keep, :]
return dets
| 39.44958 | 94 | 0.582242 | [
"Apache-2.0"
] | gbstack/PaddleDetection | ppdet/modeling/post_process.py | 28,167 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceProviderOperationDefinition(Model):
"""Describes the Resource Provider Operation.
:param name: Resource provider operation name.
:type name: str
:param display: Resource provider display properties.
:type display: ~microsoft.vsonline.models.ResourceProviderOperationDisplay
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'ResourceProviderOperationDisplay'},
}
def __init__(self, **kwargs):
super(ResourceProviderOperationDefinition, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
| 36.363636 | 82 | 0.618333 | [
"MIT"
] | MyronFanQiu/azure-cli-extensions | src/codespaces/azext_codespaces/vendored_sdks/vsonline/models/resource_provider_operation_definition.py | 1,200 | Python |
JONG_COMP = {
'ㄱ': {
'ㄱ': 'ㄲ',
'ㅅ': 'ㄳ',
},
'ㄴ': {
'ㅈ': 'ㄵ',
'ㅎ': 'ㄶ',
},
'ㄹ': {
'ㄱ': 'ㄺ',
'ㅁ': 'ㄻ',
'ㅂ': 'ㄼ',
'ㅅ': 'ㄽ',
'ㅌ': 'ㄾ',
'ㅍ': 'ㄿ',
'ㅎ': 'ㅀ',
}
}
DEFAULT_COMPOSE_SEPARATOR = u'ᴥ'
################################################################################
# Hangul Automata functions by [email protected]
################################################################################
def decompose(text, latin_filter=True, separator=DEFAULT_COMPOSE_SEPARATOR):
from . import letter
result = ""
for c in list(text):
if letter.is_hangul(c):
result += "".join(letter.decompose(c)) + separator
else:
result = result + c
return result
STATUS_CHO = 0
STATUS_JOONG = 1
STATUS_JONG1 = 2
STATUS_JONG2 = 3
def compose(text, compose_code=DEFAULT_COMPOSE_SEPARATOR):
from .const import ONSET, NUCLEUS, CODA
from . import letter
res_text = ""
status = STATUS_CHO
for c in text:
if status == STATUS_CHO:
if c in ONSET:
chosung = c
status = STATUS_JOONG
else:
if c != compose_code:
res_text = res_text + c
elif status == STATUS_JOONG:
if c != compose_code and c in NUCLEUS:
joongsung = c
status = STATUS_JONG1
else:
res_text = res_text + chosung
if c in ONSET:
chosung = c
status = STATUS_JOONG
else:
if c != compose_code:
res_text = res_text + c
status = STATUS_CHO
elif status == STATUS_JONG1:
if c != compose_code and c in CODA:
jongsung = c
if c in JONG_COMP:
status = STATUS_JONG2
else:
res_text = res_text + letter.compose(chosung, joongsung, jongsung)
status = STATUS_CHO
else:
res_text = res_text + letter.compose(chosung, joongsung)
if c in ONSET:
chosung = c
status = STATUS_JOONG
else:
if c != compose_code:
res_text = res_text + c
status = STATUS_CHO
elif status == STATUS_JONG2:
if c != compose_code and c in JONG_COMP[jongsung]:
jongsung = JONG_COMP[jongsung][c]
c = compose_code # 종성째 출력 방지
res_text = res_text + letter.compose(chosung, joongsung, jongsung)
if c != compose_code:
res_text = res_text + c
status = STATUS_CHO
return res_text
| 24.294118 | 86 | 0.439294 | [
"Apache-2.0"
] | CodePsy-2001/hanshift | hanshift/text.py | 2,957 | Python |
import curses
import time # only for debugging
stdscr = curses.initscr()
class Listdisplay:
def __init__(self, lst, start_x, start_y, height, width, headers=None) -> None:
"""Lst is 2-d. i th list in lst is content of i+1 tab
Each string in lst should not be of more length than width
scroling is available only in vertical direction
"""
self.start_x = start_x
self.start_y = start_y
self.height = height
self.width = width
self.currentpos = 0
self.currenttab = 0
self.lst = lst
curses.curs_set(0)
newwin = curses.newwin(height, width, start_y, start_x)
newwin.border(0)
newwin.refresh()
self.display()
def display(self, tab=0):
self.pad = curses.newpad(1 + len(self.lst[tab]) + 1, self.width)
for n in range(len(self.lst[tab])):
self.pad.addstr(n, 1, self.lst[tab][n])
self.refresh_pad()
def scrollup(self):
self.currentpos += 1
self.currentpos = min(
len(self.lst[self.currenttab]) - self.height + 3, self.currentpos
)
self.refresh_pad()
def scrolldown(self):
self.currentpos -= 1
self.currentpos = max(0, self.currentpos)
self.refresh_pad()
def switchtab_right(self):
self.currentpos = 0
self.currenttab += 1
self.currenttab = min(len(self.lst), self.currenttab)
self.pad.erase()
self.refresh_pad()
self.display(self.currenttab)
def switchtab_left(self):
self.currentpos = 0
self.currenttab -= 1
self.currenttab = max(0, self.currenttab)
self.pad.erase()
self.refresh_pad()
self.display(self.currenttab)
def refresh_pad(self):
self.pad.refresh(
self.currentpos,
0,
self.start_y + 1,
self.start_x + 1,
self.height - 1,
self.width - 1,
)
def main(stdscr):
curses.init_color(1, 0, 0, 0)
lst = [
["hii", "hello", "dkfjalkfjkjlkjlkj", "lajdflkdf"],
["tab2", "tambs"],
] # for debugging
stdscr.clear()
a = Listdisplay(lst, 1, 1, 5, 10)
time.sleep(2)
a.scrollup()
time.sleep(2)
a.scrollup()
time.sleep(2)
a.scrolldown()
time.sleep(2)
a.switchtab_right()
time.sleep(2)
a.switchtab_left()
time.sleep(2)
if __name__ == "__main__":
curses.wrapper(main)
| 25.680412 | 83 | 0.570454 | [
"MIT"
] | Tubular-Terriers/code-jam | src/client/ui/widget/displaylist.py | 2,491 | Python |
from django.conf.urls import url
from django.contrib.auth import login
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.decorators.cache import cache_page
from django.urls import include, path, re_path
from .. import views
def repath_view(request):
return HttpResponse(status=200)
def path_view(request):
return HttpResponse(status=200)
def authenticated_view(request):
"""
This view can be used to test requests with an authenticated user. Create a
user with a default username, save it and then use this user to log in.
Always returns a 200.
"""
user = User(username="Jane Doe")
user.save()
login(request, user)
return HttpResponse(status=200)
urlpatterns = [
url(r"^$", views.index),
url(r"^simple/$", views.BasicView.as_view()),
url(r"^users/$", views.UserList.as_view(), name="users-list"),
url(r"^cached-template/$", views.TemplateCachedUserList.as_view(), name="cached-template-list"),
url(r"^cached-users/$", cache_page(60)(views.UserList.as_view()), name="cached-users-list"),
url(r"^fail-view/$", views.ForbiddenView.as_view(), name="forbidden-view"),
url(r"^authenticated/$", authenticated_view, name="authenticated-view"),
url(r"^static-method-view/$", views.StaticMethodView.as_view(), name="static-method-view"),
url(r"^fn-view/$", views.function_view, name="fn-view"),
url(r"^feed-view/$", views.FeedView(), name="feed-view"),
url(r"^partial-view/$", views.partial_view, name="partial-view"),
url(r"^lambda-view/$", views.lambda_view, name="lambda-view"),
url(r"^error-500/$", views.error_500, name="error-500"),
re_path(r"re-path.*/", repath_view),
path("path/", path_view),
path("include/", include("tests.contrib.django.django_app.extra_urls")),
]
| 36.68 | 100 | 0.694656 | [
"Apache-2.0",
"BSD-3-Clause"
] | AlexandreYang/dd-trace-py | tests/contrib/django/django_app/urls.py | 1,834 | Python |
import requests
import csv
import sys
import os
import json
from time_converter import date_weather_format, current_day_weather
def get_all_json_keys(keys_array, json):
for key in json.keys():
if not isinstance(json[key], str):
_ = get_all_json_keys(keys_array, json[key][0])
else:
keys_array.append(key)
return keys_array
def get_all_json_values(values_array, json):
for key in json.keys():
if not isinstance(json[key], str):
_ = get_all_json_values(values_array, json[key][0])
else:
values_array.append(json[key])
return values_array
# CONSTRUCTS THE API URL
def url_constructor(system):
request_url = url
request_url += "&q=" + system['lat_long']
request_url += "&format=json"
request_url += "&date=" + system['start_date']
request_url += "&enddate=" + current_day_weather()
request_url += "&includelocation=yes"
request_url += "&tp=24"
return request_url
if len(sys.argv) < 2:
print("Please, inform the data path. EX: san_francisco")
exit()
location_dir = sys.argv[1]
if not os.path.exists(location_dir+'/weathers'):
os.mkdir(location_dir+'/weathers')
# RETRIEVES INFORMATION OF ALL FAVORITE SYSTEMS
system_list = []
with open(location_dir+"/favorite_systems.csv", "r") as systems:
reader = csv.reader(systems, delimiter=",")
next(reader)
for line in reader:
system_dict = {}
system_dict['id'] = line[0]
system_dict['start_date'] = date_weather_format(line[13])
system_dict['end_date'] = "&enddate=" + current_day_weather()
system_dict['lat_long'] = line[14] + "," + line[15]
system_list.append(system_dict)
# ARRAY TO USE IN CASE PROGRAM FAILS IN THE MIDDLE BC OF LIMITED REQUESTS
ignore = ['52375', '29577', '55722', '70687', '8438', '41397', '13255',
'54158', '72735', '65154', '176', '52412', '72288', '48885', '32239',
'55434', '70830', '38742', '76398', '70775', '66542', '64779',
'71919', '41921']
# BASE WEATHER API URL
url = "http://api.worldweatheronline.com/premium/v1/past-weather.ashx?"
url += "key=912ce443276a4e86811154829220904"
for system in system_list:
if system['id'] in ignore:
continue
print("Fetching weather data for system:", system['id'])
weather_list = []
while system['start_date'] != current_day_weather():
request_url = url_constructor(system)
response = requests.get(request_url)
if response.status_code != 200:
print(response.content)
print(ignore)
exit()
content = response.content
content = content.decode()
content = json.loads(content)
# REVERSES THE RESULT TO NEWST RECORDS ON TOP
weather_list.insert(0, reversed(content['data']['weather']))
system['start_date'] = content['data']['weather'][-1]['date']
ignore.append(system['id'])
# SAVES DATA INTO A FILE REFERING WITH THE SYSTEM ID
file_name = location_dir + "/weathers/system_" + system['id'] + ".csv"
with open(file_name, 'w') as test_daily:
# FLAT THE JSON HEADERS
header = ','.join(get_all_json_keys([], content['data']['weather'][0]))
print(header, file=test_daily)
# FLAT THE JSON VALUES
for data_pack in weather_list:
for daily_data in data_pack:
row = ','.join(get_all_json_values([], daily_data))
print(row, file=test_daily)
| 31.657658 | 79 | 0.63432 | [
"Apache-2.0"
] | lucio-lpf/SEPA | data/weather_data.py | 3,514 | Python |
# coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 OpenAI GPT model."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import (TFPreTrainedModel, TFConv1D, TFSharedEmbeddings,
TFSequenceSummary, shape_list, get_initializer)
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-tf_model.h5"}
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
return x * tf.math.sigmoid(x)
ACT_FNS = {"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish)}
class TFAttention(tf.keras.layers.Layer):
def __init__(self, nx, n_ctx, config, scale=False, **kwargs):
super(TFAttention, self).__init__(**kwargs)
self.output_attentions = config.output_attentions
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.n_ctx = n_ctx
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name='c_attn')
self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_proj')
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
pass
@staticmethod
def causal_attention_mask(nd, ns, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:,None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def _attn(self, inputs, training=False):
q, k, v, attention_mask, head_mask = inputs
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
if self.scale:
dk = tf.cast(shape_list(k)[-1], tf.float32) # scale attention_scores
w = w / tf.math.sqrt(dk)
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = self.causal_attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w * b - 1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = tf.nn.softmax(w, axis=-1)
w = self.attn_dropout(w, training=training)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [tf.matmul(w, v)]
if self.output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = tf.transpose(x, [0, 2, 1, 3])
x_shape = shape_list(x)
new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]]
return tf.reshape(x, new_x_shape)
def split_heads(self, x):
x_shape = shape_list(x)
new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head]
x = tf.reshape(x, new_x_shape)
return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
def call(self, inputs, training=False):
x, attention_mask, head_mask = inputs
x = self.c_attn(x)
query, key, value = tf.split(x, 3, axis=2)
query = self.split_heads(query)
key = self.split_heads(key)
value = self.split_heads(value)
attn_outputs = self._attn([query, key, value, attention_mask, head_mask], training=training)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a, training=training)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class TFMLP(tf.keras.layers.Layer):
def __init__(self, n_state, config, **kwargs):
super(TFMLP, self).__init__(**kwargs)
nx = config.n_embd
self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name='c_fc')
self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name='c_proj')
self.act = gelu
self.dropout = tf.keras.layers.Dropout(config.resid_pdrop)
def call(self, x, training=False):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
h2 = self.dropout(h2, training=training)
return h2
class TFBlock(tf.keras.layers.Layer):
def __init__(self, n_ctx, config, scale=False, **kwargs):
super(TFBlock, self).__init__(**kwargs)
nx = config.n_embd
self.attn = TFAttention(nx, n_ctx, config, scale, name='attn')
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_1')
self.mlp = TFMLP(4 * nx, config, name='mlp')
self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name='ln_2')
def call(self, inputs, training=False):
x, attention_mask, head_mask = inputs
output_attn = self.attn([x, attention_mask, head_mask], training=training)
a = output_attn[0] # output_attn: a, (attentions)
n = self.ln_1(x + a)
m = self.mlp(n, training=training)
h = self.ln_2(n + m)
outputs = [h] + output_attn[1:]
return outputs # x, (attentions)
class TFOpenAIGPTMainLayer(tf.keras.layers.Layer):
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTMainLayer, self).__init__(config, *inputs, **kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.num_hidden_layers = config.n_layer
self.vocab_size = config.vocab_size
self.n_embd = config.n_embd
self.tokens_embed = TFSharedEmbeddings(config.vocab_size,
config.n_embd,
initializer_range=config.initializer_range,
name='tokens_embed')
self.positions_embed = tf.keras.layers.Embedding(config.n_positions,
config.n_embd,
embeddings_initializer=get_initializer(config.initializer_range),
name='positions_embed')
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFBlock(config.n_ctx,
config,
scale=True,
name='h_._{}'.format(i)) for i in range(config.n_layer)]
def get_input_embeddings(self):
return self.tokens_embed
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
assert len(inputs) <= 6, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get('input_ids')
attention_mask = inputs.get('attention_mask', attention_mask)
token_type_ids = inputs.get('token_type_ids', token_type_ids)
position_ids = inputs.get('position_ids', position_ids)
head_mask = inputs.get('head_mask', head_mask)
inputs_embeds = inputs.get('inputs_embeds', inputs_embeds)
assert len(inputs) <= 6, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
position_ids = tf.range(input_shape[-1], dtype=tf.int32)[tf.newaxis, :]
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
else:
attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if not head_mask is None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids, mode='embedding')
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.tokens_embed(token_type_ids, mode='embedding')
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
all_attentions = []
all_hidden_states = ()
for i, block in enumerate(self.h):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block([hidden_states, attention_mask, head_mask[i]], training=training)
hidden_states = outputs[0]
if self.output_attentions:
all_attentions.append(outputs[1])
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (all hidden_states), (attentions)
class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
pretrained_model_archive_map = TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
OPENAI_GPT_START_DOCSTRING = r""" OpenAI GPT model was proposed in
`Improving Language Understanding by Generative Pre-Training`_
by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
It's a causal (unidirectional) transformer pre-trained using language modeling on a large
corpus will long range dependencies, the Toronto Book Corpus.
This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and
refer to the TF 2.0 documentation for all matter related to general usage and behavior.
.. _`Improving Language Understanding by Generative Pre-Training`:
https://openai.com/blog/language-unsupervised/
.. _`tf.keras.Model`:
https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model
Note on the model inputs:
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :
- a single Tensor with input_ids only and nothing else: `model(inputs_ids)
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associaed to the input names given in the docstring:
`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r""" Inputs:
**input_ids**: ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
GPT is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.BPT2Tokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
A parallel sequence of tokens (can be used to indicate various portions of the inputs).
The embeddings from these tokens will be summed with the respective token embeddings.
Indices are selected in the vocabulary (unlike BERT which has a specific vocabulary for segment indices)
**position_ids**: (`optional`) ```Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**inputs_embeds**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:
Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings("The bare OpenAI GPT transformer model outputing raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the last layer of the model.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name='transformer')
def call(self, inputs, **kwargs):
outputs = self.transformer(inputs, **kwargs)
return outputs
@add_start_docstrings("""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """, OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTLMHeadModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
logits = outputs[0]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTLMHeadModel, self).__init__(config, *inputs, **kwargs)
self.transformer = TFOpenAIGPTMainLayer(config, name='transformer')
def get_output_embeddings(self):
return self.transformer.tokens_embed
def call(self, inputs, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
outputs = (lm_logits,) + transformer_outputs[1:]
return outputs # lm_logits, (all hidden_states), (attentions)
@add_start_docstrings("""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""", OPENAI_GPT_START_DOCSTRING, OPENAI_GPT_INPUTS_DOCSTRING)
class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel):
r"""
**mc_token_ids**: (`optional`, default to index of the last token of the input) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, num_choices)``:
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1[``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**mc_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)``
Prediction scores of the multiplechoice classification head (scores for each choice before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
import tensorflow as tf
from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
# Add a [CLS] to the vocabulary (we should train it also!)
# This option is currently not implemented in TF 2.0
raise NotImplementedError
tokenizer.add_special_tokens({'cls_token': '[CLS]'})
model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size
print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = tf.constant([tokenizer.encode(s) for s in choices])[None, :] # Batch size 1, 2 choices
mc_token_ids = tf.constant([input_ids.size(-1), input_ids.size(-1)])[None, :] # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
def __init__(self, config, *inputs, **kwargs):
super(TFOpenAIGPTDoubleHeadsModel, self).__init__(config, *inputs, **kwargs)
config.num_labels = 1
self.transformer = TFOpenAIGPTMainLayer(config, name='transformer')
self.multiple_choice_head = TFSequenceSummary(config, initializer_range=config.initializer_range, name='multiple_choice_head')
def get_output_embeddings(self):
return self.transformer.tokens_embed
def call(self, inputs, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, mc_token_ids=None, training=False):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
attention_mask = inputs[1] if len(inputs) > 1 else attention_mask
token_type_ids = inputs[2] if len(inputs) > 2 else token_type_ids
position_ids = inputs[3] if len(inputs) > 3 else position_ids
head_mask = inputs[4] if len(inputs) > 4 else head_mask
inputs_embeds = inputs[5] if len(inputs) > 5 else inputs_embeds
mc_token_ids = inputs[6] if len(inputs) > 6 else mc_token_ids
assert len(inputs) <= 7, "Too many inputs."
elif isinstance(inputs, dict):
input_ids = inputs.get('input_ids')
attention_mask = inputs.get('attention_mask', attention_mask)
token_type_ids = inputs.get('token_type_ids', token_type_ids)
position_ids = inputs.get('position_ids', position_ids)
head_mask = inputs.get('head_mask', head_mask)
inputs_embeds = inputs.get('inputs_embeds', inputs_embeds)
mc_token_ids = inputs.get('mc_token_ids', mc_token_ids)
assert len(inputs) <= 7, "Too many inputs."
else:
input_ids = inputs
if input_ids is not None:
input_shapes = shape_list(input_ids)
else:
input_shapes = shape_list(inputs_embeds)[:-1]
seq_length = input_shapes[-1]
flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
flat_inputs = [flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, inputs_embeds]
transformer_outputs = self.transformer(flat_inputs, training=training)
hidden_states = transformer_outputs[0]
hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:])
lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear")
mc_logits = self.multiple_choice_head([hidden_states, mc_token_ids], training=training)
mc_logits = tf.squeeze(mc_logits, axis=-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
return outputs # lm logits, mc logits, (all hidden_states), (attentions)
| 50.586265 | 193 | 0.668675 | [
"MIT"
] | richardbaihe/semantic_unwritten | transformers/modeling_tf_openai.py | 30,200 | Python |
from singlecellmultiomics.bamProcessing.pileup import pileup_truncated
def get_pileup_vect(alignments, contig, pos, ref, alt):
"""Create feature vector for selected variant
Args:
alignments(pysam.AlignmentFile) : Handle to alignmentfile
contig(str) : contig to perform pileup
pos(int) : zeros based position of variant to pileup
ref(str) : reference base
alt(str) : alternative base
Returns
total(int) : Total amount of bases overlapping with selected location
ref_calls : Total amount of bases matching ref
alt_calls : Total amount of bases matching alt
other_calls : Total amount of bases matching neither ref nor alt
"""
total = 0
ref_calls = 0
alt_calls = 0
other_calls = 0
start=pos
stop = pos+1
for pileupcolumn in pileup_truncated(alignments,contig,start,stop,stepper='all'):
for i,pileupread in enumerate(pileupcolumn.pileups):
if not pileupread.is_del and not pileupread.is_refskip:
call = pileupread.alignment.query_sequence[pileupread.query_position]
if call==ref:
ref_calls += 1
elif call==alt:
alt_calls += 1
else:
other_calls += 1
other_calls+=1
return total, ref_calls, alt_calls, other_calls
# Create mapping quality feature vector:
def get_mapping_q_vect(alignments_handle, contig, pos, radius=150):
"""Obtain histogram of mapping qualties, clipped at 60
Args:
alignments(pysam.AlignmentFile) : Handle to alignmentfile
contig(str) : contig
pos(int) : zeros based position of location to check mapping qualties
radius(int) : radius to check around selected location
Returns:
mapping_qualities(list) : Histogram with 7 bins (0 to highest mapping quality)
"""
mapping_qualities = [0]*7
for read in alignments_handle.fetch(contig, pos-radius, pos+radius):
mapping_qualities[min(60,int(read.mapping_quality/10))]+=1
return mapping_qualities
| 37.192982 | 86 | 0.659434 | [
"MIT"
] | BuysDB/SingleCellMultiOmics | singlecellmultiomics/bamProcessing/bamFeatures.py | 2,120 | Python |
#=============================================================================
# Copyright 2017 FLIR Integrated Imaging Solutions, Inc. All Rights Reserved.
#
# This software is the confidential and proprietary information of FLIR
# Integrated Imaging Solutions, Inc. ('Confidential Information'). You
# shall not disclose such Confidential Information and shall use it only in
# accordance with the terms of the license agreement you entered into
# with FLIR Integrated Imaging Solutions, Inc. (FLIR).
#
# FLIR MAKES NO REPRESENTATIONS OR WARRANTIES ABOUT THE SUITABILITY OF THE
# SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT. FLIR SHALL NOT BE LIABLE FOR ANY DAMAGES
# SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING
# THIS SOFTWARE OR ITS DERIVATIVES.
#=============================================================================
import PyCapture2
def print_build_info():
lib_ver = PyCapture2.getLibraryVersion()
print('PyCapture2 library version: %d %d %d %d' % (lib_ver[0], lib_ver[1], lib_ver[2], lib_ver[3]))
print()
def print_camera_info(cam):
cam_info = cam.getCameraInfo()
print('\n*** CAMERA INFORMATION ***\n')
print('Serial number - %d' % cam_info.serialNumber)
print('Camera model - %s' % cam_info.modelName)
print('Camera vendor - %s' % cam_info.vendorName)
print('Sensor - %s' % cam_info.sensorInfo)
print('Resolution - %s' % cam_info.sensorResolution)
print('Firmware version - %s' % cam_info.firmwareVersion)
print('Firmware build time - %s' % cam_info.firmwareBuildTime)
print()
def enable_embedded_timestamp(cam, enable_timestamp):
embedded_info = cam.getEmbeddedImageInfo()
if embedded_info.available.timestamp:
cam.setEmbeddedImageInfo(timestamp = enable_timestamp)
if enable_timestamp :
print('\nTimeStamp is enabled.\n')
else:
print('\nTimeStamp is disabled.\n')
def grab_images(cam, num_images_to_grab):
prev_ts = None
for i in range(num_images_to_grab):
try:
image = cam.retrieveBuffer()
except PyCapture2.Fc2error as fc2Err:
print('Error retrieving buffer : %s' % fc2Err)
continue
ts = image.getTimeStamp()
if prev_ts:
diff = (ts.cycleSeconds - prev_ts.cycleSeconds) * 8000 + (ts.cycleCount - prev_ts.cycleCount)
print('Timestamp [ %d %d ] - %d' % (ts.cycleSeconds, ts.cycleCount, diff))
prev_ts = ts
newimg = image.convert(PyCapture2.PIXEL_FORMAT.BGR)
print('Saving the last image to fc2TestImage.png')
newimg.save('fc2TestImage.png'.encode('utf-8'), PyCapture2.IMAGE_FILE_FORMAT.PNG)
#
# Example Main
#
# Print PyCapture2 Library Information
print_build_info()
# Ensure sufficient cameras are found
bus = PyCapture2.BusManager()
num_cams = bus.getNumOfCameras()
print('Number of cameras detected: ', num_cams)
if not num_cams:
print('Insufficient number of cameras. Exiting...')
exit()
# Select camera on 0th index
c = PyCapture2.Camera()
uid = bus.getCameraFromIndex(0)
c.connect(uid)
print_camera_info(c)
# Enable camera embedded timestamp
enable_embedded_timestamp(c, True)
print('Starting image capture...')
c.startCapture()
grab_images(c, 100)
c.stopCapture()
# Disable camera embedded timestamp
enable_embedded_timestamp(c, False)
c.disconnect()
input('Done! Press Enter to exit...\n') | 36.897959 | 106 | 0.663717 | [
"MIT"
] | sjtu-automatic-maritime-system/PengZhenghao | PyCapture2-2.13.31/examples/python3/FlyCapture2Test.py | 3,616 | Python |
import json
def process_data(data):
member_dict = {}
def process_post(post):
if 'comments' in post:
for comment in post['comments']['data']:
commenter_id = comment['from']['id']
if commenter_id not in member_dict:
member_dict[commenter_id] = comment['from']
member_dict[commenter_id]['comment_count'] = 1
member_dict[commenter_id]['like_count'] = comment['like_count']
else:
member_dict[commenter_id]['comment_count'] += 1
member_dict[commenter_id]['like_count'] += comment['like_count']
process_post(comment)
for post in data:
process_post(post)
activity_dict = {}
for member_id in member_dict:
member = member_dict[member_id]
activity_score = 0
if 'comment_count' in member:
activity_score += member['comment_count']
if 'like_count' in member:
activity_score += 2 * member['like_count']
activity_dict[member['id']] = {
'facebook_id': member_id,
'activity_score': activity_score,
'name': member['name'],
'like_count': member['like_count'],
'comment_count': member['comment_count'],
}
return activity_dict
def top_k_active_members(data, k):
members_list = [value for (key, value) in data.items()]
members_list.sort(key=lambda x: x['activity_score'], reverse=True)
return members_list[:k]
def read_files(files):
combined_data = []
for file in files:
with open('./data/' + file + '.json') as data_file:
data = json.load(data_file)
combined_data.extend(data['data'])
return combined_data
def process():
combined_data = read_files(['2015-05-04', '2015-05-05', '2015-05-06', '2015-05-07', '2015-05-08', '2015-05-09', '2015-05-10'])
activity_dict = process_data(combined_data)
active_members = top_k_active_members(activity_dict, 50)
open('./leaderboard/may.json', 'w').write(json.dumps(active_members))
process()
| 30.171875 | 128 | 0.662351 | [
"MIT"
] | yangshun/nuswhispers-leaderboard | process-whispers.py | 1,931 | Python |
# Generated by Django 2.2.7 on 2019-11-21 01:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0004_remove_user_register_time'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(max_length=100, primary_key=True, serialize=False),
),
]
| 22.421053 | 86 | 0.622066 | [
"MIT"
] | darkliang/JudeeBE | user/migrations/0005_auto_20191121_0120.py | 426 | Python |
# Copyright (c) 2022 RWTH Aachen - Werkzeugmaschinenlabor (WZL)
# Contact: Simon Cramer, [email protected]
from sherpa import Client
from sherpa.schedulers import Scheduler, _JobStatus
import requests
import json
import logging as logg
import numpy as np
import socket
from time import sleep
import os
from absl import logging
from s3_smart_open import read_json, to_s3, get_filenames, generate_s3_strings, generate_s3_session, delete_s3_objects
logger = logg.getLogger(__name__)
class ArgoScheduler(Scheduler):
"""Argo Scheduler submit, update, kill jobs and send metrics for sherpa hpo
Args:
Scheduler (class): shepra.schedulers
"""
def __init__(self, default_parameter,trial_run_parameter,lower_is_better,objective,filename_objective,argo_ip,argo_port,k8_namespace,storage_strategy='keep',output_dir=''):
"""Set init values
Args:
default_parameter (dict): Parameter that will be submitted with the argo workflow in a kind of input flags.
rebuild_parameter (dict): Parameter that were genereted when creating the hp for sherpa.
lower_is_better (bool): whether to minimize or maximize the objective
objective (str): Name of the objective that will be optimized for. Must be a key/name from the metrics that were generated within a trial run.
filename_objective (str): Filename of the file that contains the objective value which was created within a trial run.
argo_ip (str): Argo server ip
argp_port (str): Argo server port
k8_namespace (str): Name of the kubernetes namespace where the trial container should be executed.
storage_strategy (str, optional): wether to keep all, delete all or keep the files from the best run. Defaults to 'keep'.
output_dir (str): needed for sherpa api
"""
# Load Argo Api Token from env variable (set by k8 secret)
if 'api_exec_token' in os.environ:
api_token = 'Bearer ' + os.environ['api_exec_token']
else:
logging.error('No Authorization Token detected. Check Kubernetes Secrets and Argo Template!')
logging.info('Default Parameter: {}'.format(default_parameter))
self.submit_url = 'https://' + argo_ip + ':' + argo_port + '/api/v1/workflows/' + k8_namespace + '/submit'
self.status_url = 'https://' + argo_ip + ':' + argo_port + '/api/v1/workflows/' + k8_namespace + '/'
self.delete_url = self.status_url
self.client = Client()
self.best_metric = {"job_id": None, "metric": None}
self.headers = {'Authorization': api_token }
self.killed_jobs = []
self.output_dir = output_dir
self.default_parameter = default_parameter
self.trial_run_parameter = trial_run_parameter
self.storage_strategy = storage_strategy
self.hostname = socket.gethostname()
self.trials = {}
self.run_name = self.default_parameter['run_name']
self.metrics_filename = filename_objective
self.objective = objective
self.lower_is_better = lower_is_better
self.output_path = self.default_parameter['output_path']
self.decode_status = {'Succeeded': _JobStatus.finished,
'Running': _JobStatus.running,
'Pending': _JobStatus.queued,
'Failed': _JobStatus.failed,
'Stopped': _JobStatus.killed,
'Other': _JobStatus.other}
def make_request(self,request,data,max_wait=600,step=5,wait=0):
"""Sends a get, post or delete request every step seconds until the request was successful or wait exceeds max_wait.
Args:
request (str): Define which kind of request to execute.
data (str): Submit information or sherpas job_id for a status request or job_id for deleting a trial.
max_wait (int, optional): Time in seconds after which the requests repetition will be stopped. Defaults to 600.
step (int, optional): Time in seconds after which a faulty request is repeated. Defaults to 5.
wait (int, optional): Variable to which the step time is added and compared to max_wait. Defaults to 0.
Returns:
[class]: Response
"""
proxies = {"http": None, "https": None}
if request == 'GET':
response = requests.get(self.status_url+data, headers=self.headers, proxies=proxies, verify=False)
elif request == 'POST':
response = requests.post(self.submit_url, headers=self.headers, data=data, proxies=proxies, verify=False)
elif request == 'DELETE':
response = requests.delete(self.status_url+data, headers=self.headers, proxies=proxies, verify=False)
else:
logging.error('Request argument is none of ["GET","POST","DELETE"].')
if response.status_code == 200 or wait > max_wait:
if wait > max_wait:
logging.warning("Request has failed for {} seconds with status code: {}:{}".format(max_wait, response.status_code, response.reason))
return response
else:
sleep(step)
logging.error("Request has failed for {} times with reason {}:{}".format(1+int((max_wait/step)-((max_wait/step)-(wait/step))), response.status_code, response.reason))
return self.make_request(request=request,data=data,max_wait=max_wait,step=step,wait=wait+step)
def file_strategy(self,job_id,metrics):
"""Delete all trial files which were generated through a hpo trial
It deletes all files in the output_path related to the job_id
Args:
job_id (str): Sherpa Job_ID / Argo trial workflow name
metrics (dict): metrics to compare
"""
if job_id in self.trials:
trial = self.trials[job_id]
if 'output_path' in trial:
if self.storage_strategy == 'delete':
delete_s3_objects(trial['output_path'])
elif self.storage_strategy == 'best':
if self.best_metric['metric'] == None:
self.best_metric['metric'] = metrics[self.objective]
self.best_metric['job_id'] = job_id
elif self.lower_is_better == True and metrics[self.objective] < self.best_metric['metric']:
delete_s3_objects(self.trials[self.best_metric['job_id']]['output_path'])
self.best_metric['metric'] = metrics[self.objective]
self.best_metric['job_id'] = job_id
logging.info('New best trial {} with metric {}'.format(self.best_metric['job_id'],
self.best_metric['metric']))
elif self.lower_is_better == False and metrics[self.objective] > self.best_metric['metric']:
delete_s3_objects(self.trials[self.best_metric['job_id']]['output_path'])
self.best_metric['metric'] = metrics[self.objective]
self.best_metric['job_id'] = job_id
logging.info('New best trial {} with metric {}'.format(self.best_metric['job_id'],
self.best_metric['metric']))
else:
delete_s3_objects(trial['output_path'])
def submit_job(self,command, env={}, job_name=''):
"""Submits a new hpo trial to argo in order to start a workflow template
Args:
command (list[str]): List that contains ['Argo WorkflowTemplate','Entrypoint of that Argo WorkflowTemplate]
env (dict, optional): Dictionary that contains env variables, mainly the sherpa_trial_id. Defaults to {}.
job_name (str, optional): Not needed for Argo scheduler. Defaults to ''.
Returns:
[str]: Sherpa Job_ID / Name of the workflow that was started by Argo
"""
os.environ['SHERPA_TRIAL_ID'] = env['SHERPA_TRIAL_ID']
# Get new trial from the DB
trial = self.client.get_trial()
tp = trial.parameters
WorkflowTemplate = command[0]
entrypoint = command[1]
# Set output path for next trial by using sherpa trial ID, Trial ID --> 0,1,...max_num_trials-1 or trial parameters 'save_to', 'load_from' for PBT and ASHA.
default_parameter = self.default_parameter
if 'save_to' in tp:
default_parameter['output_path'] = os.path.join(self.output_path,str(tp['save_to']),'')
else:
default_parameter['output_path'] = os.path.join(self.output_path,str(env['SHERPA_TRIAL_ID']),'')
if 'load_from' in tp and tp['load_from'] != '':
default_parameter['model_input_path'] = os.path.join(self.output_path,str(tp['load_from']),'')
WorkflowTemplate = eval(self.trial_run_parameter)['WorkflowTemplateContinue']
entrypoint = eval(self.trial_run_parameter)['EntrypointContinue']
else:
default_parameter['model_input_path'] = ''
# Merge trial parameter with the default parameter dict
# Eval trial_run_parameter string
merged_parameter = eval(self.trial_run_parameter)
for k, v in default_parameter.items():
merged_parameter[k] = v
# Get epochs to save them in trials in order to load epochs when sending metrics to the DB (self.get_status())
epochs = merged_parameter.get('epochs',0)
# Convert the parameter dictionary to a list format for the input parameter for argo
parameters_list = []
for key, val in merged_parameter.items():
parameters_list.append("{}={}".format(key,val))
# The data that will be submitted to argo. The Label will make it easier to filter the workflows matching to the sherpa Workflow
data = json.dumps( {"resourceKind": "WorkflowTemplate",
"resourceName": WorkflowTemplate,
"submitOptions": {"entrypoint": entrypoint,
"labels" : "sherpa_run="+self.hostname+",run_name="+self.run_name,
"parameters" : parameters_list }})
# Submits the WorkflowTemplate with the data to Argo
response_submit = self.make_request(request='POST',data=data)
# A successfully submitted workflow will have a response_status_code == 200
if response_submit.status_code == 200:
job_id = json.loads(response_submit.content)['metadata']['name']
logging.info('Submitted trial {} with job_id {}'.format(env['SHERPA_TRIAL_ID'],job_id))
else:
job_id = 'failed_trial_id_' + str(env['SHERPA_TRIAL_ID'])
logging.warning('Failed to sumbit job with Trial_ID {} to argo.'.format(env['SHERPA_TRIAL_ID']))
# Save some trial information which is needed in self.get_status
self.trials[job_id] = {'trial': trial,'epochs':epochs,'output_path':default_parameter['output_path'],'model_input_path':default_parameter['model_input_path'],'status':0,'finished':False}
# return the Argo workflow name to sherpa
return job_id
def get_status(self, job_id):
"""Obtains the current status of the job.
Sends objective values/metrics to the DB when a trial succeeded.
Compares objective values and decides wether to delete or keep files.
Args:
job_id (str): Sherpa Job_ID / Name of the workflow that was started by Argo
Returns:
sherpa.schedulers._JobStatus: the job-status.
"""
response_status = self.make_request(request='GET',data=job_id)
if response_status.status_code == 200:
status = json.loads(response_status.content)['status']['phase']
# sends metric to DB when dag has finished
if status == 'Succeeded':
# When Argo trial dag has succeeded load metrics and keep/delete files in s3 storage. Set finished flag to true afterwards in order to return Succeeded Status to shera in the next runner_loop()
if self.trials[job_id]['finished'] == True:
logging.info('Set status to finished for trial : {}'.format(self.trials[job_id]['trial'].id))
else:
filename = self.metrics_filename
input_path = self.trials[job_id]['output_path']
metrics = read_json(input_path,filename)
logging.info('Send metrics for trial: {}'.format(self.trials[job_id]['trial'].id))
self.client.send_metrics(trial=self.trials[job_id]['trial'], iteration=self.trials[job_id]['epochs'],objective=metrics[self.objective],context=metrics)
status = 'Running'
# Set finished Flag to true after client send metrics to the DB
self.trials[job_id]['finished'] = True
# Delete all files / keep all files / keep the files of the best run in s3 storage
self.file_strategy(job_id,metrics)
elif status == 'Failed':
delete_s3_objects(self.trials[job_id]['output_path'])
elif job_id in self.killed_jobs:
status = 'Stopped'
else:
status = 'Other'
# Decode Job status
s = self.decode_status.get(status, _JobStatus.other)
# info when job status has changed
if s != self.trials[job_id]['status']:
logging.info('Jobstatus: {} for Job {}'.format(status,job_id))
self.trials[job_id]['status'] = s
return s
def kill_job(self, job_id):
"""Kill a job by deleting the argo workflow completly
Args:
job_id (str): Sherpa Job_ID / Name of the workflow that was started by Argo
"""
# Delete Argo Trial Dag / Workflow
response_kill = self.make_request(request='DELETE',data=job_id)
if response_kill.status_code == 200:
# Append killed workflows job_id to list in order to return Status killed in get status because the request will not succeed (workflow does not exsist anymore)
self.killed_jobs.append(str(job_id))
| 53.767528 | 209 | 0.620891 | [
"MIT"
] | predictive-quality/ml-pipeline-blocks-hpo-sherpa | argo_scheduler.py | 14,571 | Python |
# Generated by Django 2.2.7 on 2020-01-10 19:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('chatapp', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='id',
),
migrations.AlterField(
model_name='message',
name='receiver',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='receiver', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='message',
name='sender',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL),
),
]
| 32.057143 | 148 | 0.63369 | [
"MIT"
] | milad-r/django_project | ChatProject/chatapp/migrations/0002_auto_20200110_2253.py | 1,122 | Python |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utilities"""
import importlib
import operator
import platform
import sys
from importlib.util import find_spec
import torch
from packaging.version import Version
from pkg_resources import DistributionNotFound
def _module_available(module_path: str) -> bool:
"""
Check if a path is available in your environment
>>> _module_available('os')
True
>>> _module_available('bla.bla')
False
"""
try:
return find_spec(module_path) is not None
except AttributeError:
# Python 3.6
return False
except ModuleNotFoundError:
# Python 3.7+
return False
def _compare_version(package: str, op, version) -> bool:
"""
Compare package version with some requirements
>>> _compare_version("torch", operator.ge, "0.1")
True
"""
try:
pkg = importlib.import_module(package)
except (ModuleNotFoundError, DistributionNotFound):
return False
try:
pkg_version = Version(pkg.__version__)
except TypeError:
# this is mock by sphinx, so it shall return True ro generate all summaries
return True
return op(pkg_version, Version(version))
_IS_WINDOWS = platform.system() == "Windows"
_IS_INTERACTIVE = hasattr(sys, "ps1") # https://stackoverflow.com/a/64523765
_TORCH_LOWER_EQUAL_1_4 = _compare_version("torch", operator.le, "1.5.0")
_TORCH_GREATER_EQUAL_1_6 = _compare_version("torch", operator.ge, "1.6.0")
_TORCH_GREATER_EQUAL_1_7 = _compare_version("torch", operator.ge, "1.7.0")
_TORCH_GREATER_EQUAL_1_8 = _compare_version("torch", operator.ge, "1.8.0")
_TORCH_GREATER_EQUAL_1_8_1 = _compare_version("torch", operator.ge, "1.8.1")
_TORCH_GREATER_EQUAL_1_9 = _compare_version("torch", operator.ge, "1.9.0")
_APEX_AVAILABLE = _module_available("apex.amp")
_BOLTS_AVAILABLE = _module_available('pl_bolts')
_DEEPSPEED_AVAILABLE = not _IS_WINDOWS and _module_available('deepspeed')
_FAIRSCALE_AVAILABLE = _TORCH_GREATER_EQUAL_1_6 and not _IS_WINDOWS and _module_available('fairscale.nn')
_FAIRSCALE_PIPE_AVAILABLE = _FAIRSCALE_AVAILABLE and _compare_version("fairscale", operator.le, "0.1.3")
_GROUP_AVAILABLE = not _IS_WINDOWS and _module_available('torch.distributed.group')
_HOROVOD_AVAILABLE = _module_available("horovod.torch")
_HYDRA_AVAILABLE = _module_available("hydra")
_HYDRA_EXPERIMENTAL_AVAILABLE = _module_available("hydra.experimental")
_KINETO_AVAILABLE = _TORCH_GREATER_EQUAL_1_8_1 and torch.profiler.kineto_available()
_NATIVE_AMP_AVAILABLE = _module_available("torch.cuda.amp") and hasattr(torch.cuda.amp, "autocast")
_OMEGACONF_AVAILABLE = _module_available("omegaconf")
_RPC_AVAILABLE = not _IS_WINDOWS and _module_available('torch.distributed.rpc')
_TORCH_QUANTIZE_AVAILABLE = bool([eg for eg in torch.backends.quantized.supported_engines if eg != 'none'])
_TORCHTEXT_AVAILABLE = _module_available("torchtext")
_TORCHVISION_AVAILABLE = _module_available('torchvision')
_TORCHMETRICS_LOWER_THAN_0_3 = _compare_version("torchmetrics", operator.lt, "0.3.0")
_TORCHMETRICS_GREATER_EQUAL_0_3 = _compare_version("torchmetrics", operator.ge, "0.3.0")
_XLA_AVAILABLE = _module_available("torch_xla")
from pytorch_lightning.utilities.xla_device import XLADeviceUtils # noqa: E402
_TPU_AVAILABLE = XLADeviceUtils.tpu_device_exists()
| 40.416667 | 107 | 0.765206 | [
"Apache-2.0"
] | Queuecumber/pytorch-lightning | pytorch_lightning/utilities/imports.py | 3,880 | Python |
from ._ffmpeg_normalize import FFmpegNormalize
from ._media_file import MediaFile
from ._version import __version__
| 23.4 | 46 | 0.863248 | [
"MIT"
] | MCeddy/ffmpeg-normalize | ffmpeg_normalize/__init__.py | 117 | Python |
#!/usr/bin/env python
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="Taerbit",
version="0.0.1",
author="Finn Torbet",
author_email="[email protected]",
description="Package to process images through interpretability methods and then measure them against a binary mask segmentation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Taerbit/EXP",
packages=['src'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
zip_safe=False, install_requires=['cv2', 'numpy', 'keras', 'pandas', 'matplotlib', 'seaborn', 'shap', 'pathlib']) | 34.875 | 134 | 0.669056 | [
"MIT"
] | Taerbit/EXP | setup.py | 837 | Python |
import _plotly_utils.basevalidators
class RangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="range", parent_name="layout.xaxis", **kwargs):
super(RangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
edit_type=kwargs.pop("edit_type", "axrange"),
implied_edits=kwargs.pop("implied_edits", {"autorange": False}),
items=kwargs.pop(
"items",
[
{
"anim": True,
"editType": "axrange",
"impliedEdits": {"^autorange": False},
"valType": "any",
},
{
"anim": True,
"editType": "axrange",
"impliedEdits": {"^autorange": False},
"valType": "any",
},
],
),
**kwargs
)
| 35.419355 | 82 | 0.435337 | [
"MIT"
] | AWS-BugBust-054/plotly.py | packages/python/plotly/plotly/validators/layout/xaxis/_range.py | 1,098 | Python |
"""
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from mo.front.common.replacement import FrontReplacementOp
from mo.graph.graph import Graph, Node
from extensions.ops.MatMul import MatMul
from extensions.ops.elementwise import Add
class Linear(FrontReplacementOp):
op = 'Linear'
enabled = True
def replace_op(self, graph: Graph, node: Node):
matmul = MatMul(graph, dict(name=node.name, transpose_b=True)).create_node([node.in_node(0), node.in_node(1)])
# Bias
if len(node.in_nodes()) > 2:
matmul = Add(graph, dict(name=node.name + '/bias')).create_node([matmul, node.in_node(2)])
return [matmul.id]
| 33.805556 | 118 | 0.727198 | [
"Apache-2.0"
] | IvanNovoselov/openvino_contrib | modules/mo_pytorch/mo_extensions/front/pytorch/linear.py | 1,217 | Python |
#@+leo-ver=5-thin
#@+node:ekr.20160928073518.1: * @file ../plugins/pyplot_backend.py
'''
A helper for the viewrendered plugin.
This is *NOT* a real plugin.
'''
#@+<< pyplot_backend imports >>
#@+node:ekr.20160928074801.1: ** << pyplot_backend imports >>
from leo.core import leoGlobals as g
from leo.plugins import viewrendered as vr
from leo.core.leoQt import isQt5, isQt6, QtCore, QtWidgets
try:
if isQt5 or isQt6:
import matplotlib.backends.backend_qt5agg as backend_qt5agg
FigureCanvasQTAgg = backend_qt5agg.FigureCanvasQTAgg
else:
import matplotlib.backends.backend_qt4agg as backend_qt4agg
FigureCanvasQTAgg = backend_qt4agg.FigureCanvasQTAgg
# Common imports
import matplotlib.backends.backend_qt5 as backend_qt5
import matplotlib.backend_bases as backend_bases
from matplotlib.figure import Figure
FigureManagerBase = backend_bases.FigureManagerBase
except ImportError:
g.es_exception()
#@-<< pyplot_backend imports >>
#@+others
#@+node:ekr.20160928073605.1: ** init
def init():
'''Return True if the plugin has loaded successfully.'''
g.trace('pyplot_backend.py is not a plugin.')
return False
#@+node:ekr.20160928082006.1: ** Leo backend
#@+node:ekr.20160928074615.2: *3* new_figure_manager
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
#@+node:ekr.20160928074615.3: *3* new_figure_manager_given_figure
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQTAgg(figure)
return LeoFigureManagerQT(canvas, num)
#@+node:ekr.20160929050151.1: *3* class LeoFigureManagerQT
# From backend_qt5.py
# pylint: disable=no-member
# matplotlib.backends.backend_qt5.FigureManager probably does exist. See:
# https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/backends/backend_qt5.py
class LeoFigureManagerQT(backend_qt5.FigureManager):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The qt.QToolBar
window : The qt.QMainWindow (not set)
"""
#@+others
#@+node:ekr.20160929050151.2: *4* __init__ (LeoFigureManagerQt)
# Do NOT call the base class ctor. It creates a Qt MainWindow.
# pylint: disable=super-init-not-called
# pylint: disable=non-parent-init-called
def __init__(self, canvas, num):
'''Ctor for the LeoFigureManagerQt class.'''
self.c = c = g.app.log.c
super().__init__(canvas, num)
self.canvas = canvas
# New code for Leo: embed the canvas in the viewrendered area.
self.vr_controller = vc = vr.controllers.get(c.hash())
self.splitter = c.free_layout.get_top_splitter()
self.frame = w = QtWidgets.QFrame()
w.setLayout(QtWidgets.QVBoxLayout())
w.layout().addWidget(self.canvas)
if vc:
vc.embed_widget(w)
class DummyWindow:
def __init__(self, c):
self.c = c
self._destroying = None
def windowTitle(self):
return self.c.p.h
self.window = DummyWindow(c)
# See comments in the base class ctor, in backend_qt5.py.
FocusPolicy = QtCore.Qt.FocusPolicy if isQt6 else QtCore.Qt
self.canvas.setFocusPolicy(FocusPolicy.StrongFocus)
self.canvas.setFocus()
self.canvas._destroying = False
self.toolbar = self._get_toolbar(self.canvas, self.frame)
if self.toolbar is not None:
# The toolbar is a backend_qt5.NavigationToolbar2QT.
layout = self.frame.layout()
layout.addWidget(self.toolbar)
# add text label to status bar
self.statusbar_label = QtWidgets.QLabel()
layout.addWidget(self.statusbar_label)
# pylint: disable=no-member
if isQt5 or isQt6:
pass # The status bar doesn't work yet.
else:
self.toolbar.message.connect(self._show_message)
self.canvas.draw_idle()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
#@+node:ekr.20160929083114.1: *4* destroy
def destroy(self, *args):
# Causes problems.
# self.frame.deleteLater()
self.frame = None
#@-others
#@-others
#@@language python
#@-leo
| 35.139706 | 97 | 0.663109 | [
"MIT"
] | ATikhonov2/leo-editor | leo/plugins/pyplot_backend.py | 4,779 | Python |
import sys
import os
import errno
import time
import json
import glob
from base64 import b64decode
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Handler(FileSystemEventHandler):
def __init__(self, provider):
self.provider = provider
def on_created(self, event):
self.handle_event(event)
def on_modified(self, event):
self.handle_event(event)
def handle_event(self, event):
# Check if it's a JSON file
if not event.is_directory and event.src_path.endswith('.json'):
print('Certificate storage changed (' + os.path.basename(event.src_path) + ')')
self.handle_file(event.src_path)
def handle_file(self, file, provider):
# Read JSON file
data = json.loads(open(file).read())
print('Using provider: ' + self.provider)
# Determine ACME version
try:
acme_version = 2 if 'acme-v02' in data[self.provider]['Account']['Registration']['uri'] else 1
except TypeError:
if 'DomainsCertificate' in data:
acme_version = 1
else:
acme_version = 2
# Find certificates
if acme_version == 1:
certs = data['DomainsCertificate']['Certs']
elif acme_version == 2:
certs = data[self.provider]['Certificates']
print('Certificate storage contains ' + str(len(certs)) + ' certificates')
# Loop over all certificates
for c in certs:
if acme_version == 1:
name = c['Certificate']['Domain']
privatekey = c['Certificate']['PrivateKey']
fullchain = c['Certificate']['Certificate']
sans = c['Domains']['SANs']
elif acme_version == 2:
name = c['domain']['main']
privatekey = c['key']
fullchain = c['certificate']
sans = c['domain']['sans'] if 'sans' in c['domain'].keys() else None
# Decode private key, certificate and chain
privatekey = b64decode(privatekey).decode('utf-8')
fullchain = b64decode(fullchain).decode('utf-8')
start = fullchain.find('-----BEGIN CERTIFICATE-----', 1)
cert = fullchain[0:start]
chain = fullchain[start:]
# Create domain directory if it doesn't exist
directory = 'certs/' + name + '/'
try:
os.makedirs(directory)
except OSError as error:
if error.errno != errno.EEXIST:
raise
# Write private key, certificate and chain to file
with open(directory + 'privkey.pem', 'w') as f:
f.write(privatekey)
with open(directory + 'cert.pem', 'w') as f:
f.write(cert)
with open(directory + 'chain.pem', 'w') as f:
f.write(chain)
with open(directory + 'fullchain.pem', 'w') as f:
f.write(fullchain)
# Write private key, certificate and chain to flat files
directory = 'certs_flat/'
with open(directory + name + '.key', 'w') as f:
f.write(privatekey)
with open(directory + name + '.crt', 'w') as f:
f.write(fullchain)
with open(directory + name + '.chain.pem', 'w') as f:
f.write(chain)
if sans:
for name in sans:
with open(directory + name + '.key', 'w') as f:
f.write(privatekey)
with open(directory + name + '.crt', 'w') as f:
f.write(fullchain)
with open(directory + name + '.chain.pem', 'w') as f:
f.write(chain)
print('Extracted certificate for: ' + name + (', ' + ', '.join(sans) if sans else ''))
if __name__ == "__main__":
# Determine path to watch
path = sys.argv[1] if len(sys.argv) > 1 else './data'
provider = sys.argv[2] if len(sys.argv) > 2 else 'default'
# Create output directories if it doesn't exist
try:
os.makedirs('certs')
except OSError as error:
if error.errno != errno.EEXIST:
raise
try:
os.makedirs('certs_flat')
except OSError as error:
if error.errno != errno.EEXIST:
raise
# Create event handler and observer
event_handler = Handler(provider)
observer = Observer()
# Extract certificates from current file(s) before watching
files = glob.glob(os.path.join(path, '*.json'))
try:
for file in files:
print('Certificate storage found (' + os.path.basename(file) + ')')
event_handler.handle_file(file, provider)
except Exception as e:
print(e)
# Register the directory to watch
observer.schedule(event_handler, path, provider)
# Main loop to watch the directory
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| 33.934211 | 106 | 0.551764 | [
"MIT"
] | GuyKh/traefik-certificate-extractor | extractor.py | 5,158 | Python |
#! /usr/bin/env python3
"""camera.py - Adding a camera for bigger levels."""
import collections
import time
import arcade
from arcade import key
# Constraints
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 650
SCREEN_TITLE = "Platformer"
# Scale sprites from original size. 1 is original.
CHARACTER_SCALING = 1
TILE_SCALING = 0.5
COIN_SCALING = 0.5
SPRITE_PIXEL_SIZE = 128
GRID_PIXEL_SIZE = SPRITE_PIXEL_SIZE * TILE_SCALING
PLAYER_MOVEMENT_SPEED = 5
GRAVITY = 1
PLAYER_JUMP_SPEED = 20
# Player starting position
PLAYER_START_X = 64
PLAYER_START_Y = 225
# Layer names from our TileMap
LAYER_NAME_PLATFORMS = "Platforms"
LAYER_NAME_COINS = "Coins"
LAYER_NAME_FOREGROUND = "Foreground"
LAYER_NAME_BACKGROUND = "Background"
LAYER_NAME_DONT_TOUCH = "Don't Touch"
class FPSCounter:
"""A class to detect frames per second."""
def __init__(self):
self.time = time.perf_counter()
self.frame_times = collections.deque(maxlen=60)
def tick(self):
"""Determine tick amount."""
t_1 = time.perf_counter()
dt = t_1 - self.time
self.time = t_1
self.frame_times.append(dt)
def get_fps(self) -> float:
"""Return FPS as a float."""
total_time = sum(self.frame_times)
if total_time == 0:
return 0
return len(self.frame_times) / sum(self.frame_times)
class Player(arcade.Sprite):
"""A class to encapsulate the player sprite."""
def update(self):
""" Move the player"""
# Check for out of bounds
self.left = max(self.left, 0)
class MyGame(arcade.Window):
"""
Main application class.
"""
def __init__(self):
"""Call the parent class and set up the window."""
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Initialize Scene and player
self.scene, self.player_sprite = None, None
# Initialize physics engine
self.physics_engine = None
# Track current state of key press
self.left_pressed = False
self.right_pressed = False
# Set cameras. Separate needed due to scrolling issues.
self.camera = None
self.gui_camera = None
# Game information
self.score = 0
self.lives_left = 0
self.timer = 0
self.fps = FPSCounter()
# Keys are set as a tuple for easier access
self.up = (key.UP, key.W)
self.down = (key.DOWN, key.S)
self.left = (key.LEFT, key.A)
self.right = (key.RIGHT, key.D)
# Our TileMap Object
self.tile_map = None
# Right edge of the map
self.end_of_map = 0
self.level = 1
# Load sounds
self.collect_coin_sound = arcade.load_sound(":resources:sounds/coin1.wav")
self.jump_sound = arcade.load_sound(":resources:sounds/jump1.wav")
self.game_over_sound = arcade.load_sound(":resources:sounds/gameover1.wav")
arcade.set_background_color(arcade.csscolor.CORNFLOWER_BLUE)
def setup(self):
"""Set-up the game here. Call this function to restart the game."""
# Setup the Cameras
self.camera = arcade.Camera(self.width, self.height)
self.gui_camera = arcade.Camera(self.width, self.height)
# Name of map file to load
map_name = f":resources:tiled_maps/map2_level_{self.level}.json"
# Layer specific options are defined on Layer names in a dictionary
# Doing this will make the SpriteList for the platforms layer
# use spatial hashing for detection.
layer_options = {
LAYER_NAME_PLATFORMS: {
"use_spatial_hash": True,
},
LAYER_NAME_COINS: {
"use_spatial_hash": True,
},
LAYER_NAME_DONT_TOUCH: {
"use_spatial_hash": True,
},
}
# Read in tiled map
self.tile_map = arcade.load_tilemap(map_name, TILE_SCALING, layer_options)
# Initialize Scene
# Automatically adds all layers as SpriteLists in proper order.
self.scene = arcade.Scene.from_tilemap(self.tile_map)
# Player setup
# Adding sprite list after means that the foreground will be rendered
# after the Player, meaning it will appear to be in front or over the top.
# Doing this before add_sprite means the draw order is possible, otherwise
# it is not possible to do.
self.scene.add_sprite_list_after("Player", LAYER_NAME_FOREGROUND)
image_source = ":resources:images" \
"/animated_characters/female_adventurer" \
"/femaleAdventurer_idle.png"
self.player_sprite = Player(image_source, CHARACTER_SCALING)
self.player_sprite.center_x = PLAYER_START_X
self.player_sprite.center_y = PLAYER_START_Y
self.scene.add_sprite("Player", self.player_sprite)
# Create the physics engine
self.physics_engine = arcade.PhysicsEnginePlatformer(
self.player_sprite, gravity_constant=GRAVITY, walls=self.scene["Platforms"]
)
# Set up game information for GUI
self.score = 0
self.lives_left = 5
# --- Other stuff
# Set the background color
if self.tile_map.background_color:
arcade.set_background_color(self.tile_map.background_color)
# Calculate the right edge of the my_map in pixels
self.end_of_map = self.tile_map.width * GRID_PIXEL_SIZE
@property
def current_fps(self) -> float:
"""Determine current fps."""
return self.fps.get_fps()
@property
def coins_left(self) -> int:
"""Determine coins remaining."""
return len(self.scene["Coins"])
@staticmethod
def gui_label(text: str, var: any, x: int, y: int):
"""
Simplify arcade.draw_text.
Keyword arguments:
text -- This is the label.
var -- This is the variable value.
x -- This is the percent point of the screen's x x that it will start at.
y -- This is the percent point of the screen's y it will start at.
"""
x, y = x / 100, y / 100
arcade.draw_text(
text=f"{text}: {var}",
start_x=SCREEN_WIDTH * x, start_y=SCREEN_HEIGHT * y,
color=arcade.csscolor.WHITE,
font_size=18,
)
def display_gui_info(self):
"""Display GUI information."""
arcade.draw_rectangle_filled(center_x=SCREEN_WIDTH / 14,
center_y=SCREEN_HEIGHT - SCREEN_HEIGHT / 10,
width=SCREEN_WIDTH / 7,
height=SCREEN_HEIGHT / 4,
color=arcade.color.IRRESISTIBLE,
)
self.gui_label("Score", self.score, 0, 95)
self.gui_label("Coins Left", self.coins_left, 0, 90)
self.gui_label("Time", round(self.timer), 0, 85)
self.gui_label("Lives", self.lives_left, 0, 80)
self.gui_label("FPS", round(self.current_fps), 90, 95)
def on_draw(self):
"""Render the screen."""
# Clears screen to the background color
arcade.start_render()
# Activate our Camera
self.camera.use()
# Draw scene
self.scene.draw()
# Activate GUI camera before elements.
self.gui_camera.use()
# Draw score while scrolling it along the screen.
self.display_gui_info()
self.fps.tick()
def on_key_press(self, button: int, modifiers: int):
"""Called whenever a key is pressed."""
if button in self.up and self.physics_engine.can_jump():
self.player_sprite.change_y = PLAYER_JUMP_SPEED
arcade.play_sound(self.jump_sound)
elif button in self.left:
self.left_pressed = True
elif button in self.right:
self.right_pressed = True
def on_key_release(self, button: int, modifiers: int):
"""Called when the user releases a key."""
if button in self.left:
self.left_pressed = False
elif button in self.right:
self.right_pressed = False
def update_player_velocity(self):
"""Update velocity based on key state."""
if self.left_pressed and not self.right_pressed:
self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED
elif self.right_pressed and not self.left_pressed:
self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED
else:
self.player_sprite.change_x = 0
def center_camera_to_player(self):
"""Ensure the camera is centered on the player."""
screen_center_x = self.player_sprite.center_x - (
self.camera.viewport_width / 2)
screen_center_y = self.player_sprite.center_y - (
self.camera.viewport_height / 2)
# Don't let camera travel past 0
if screen_center_x < 0:
screen_center_x = 0
if screen_center_y < 0:
screen_center_y = 0
player_centered = screen_center_x, screen_center_y
self.camera.move_to(player_centered)
def player_coin_collision(self):
"""
Detects player collision with coins, then removes the coin sprite.
This will play a sound and add 1 to the score.
"""
# Detect coin collision
coin_hit_list = arcade.check_for_collision_with_list(
self.player_sprite, self.scene["Coins"]
)
# Loop through each coin we hit and remove it
for coin in coin_hit_list:
# Remove the coin and add to score
coin.remove_from_sprite_lists()
arcade.play_sound(self.collect_coin_sound)
self.score += 1
def reset_player(self):
"""Reset's player to start position."""
self.player_sprite.center_x = PLAYER_START_X
self.player_sprite.center_y = PLAYER_START_Y
def stop_player(self):
"""Stop player movement."""
self.player_sprite.change_x = 0
self.player_sprite.change_y = 0
def game_over(self):
"""Sets game over and resets position."""
self.stop_player()
self.reset_player()
self.lives_left -= 1
arcade.play_sound(self.game_over_sound)
def fell_off_map(self):
"""Detect if the player fell off the map and then reset position if so."""
if self.player_sprite.center_y < -100:
self.game_over()
def touched_dont_touch(self):
"""Detect collision on Don't Touch layer. Reset player if collision."""
if arcade.check_for_collision_with_list(
self.player_sprite, self.scene[LAYER_NAME_DONT_TOUCH]
):
self.game_over()
def at_end_of_level(self):
"""Checks if player at end of level, and if so, load the next level."""
if self.player_sprite.center_x >= self.end_of_map:
self.level += 1
# Load the next level
self.setup()
def on_update(self, delta_time: float):
"""Movement and game logic."""
# Move the player with the physics engine
self.timer += delta_time
self.update_player_velocity()
self.player_sprite.update()
self.physics_engine.update()
# Detect collisions and level state
self.player_coin_collision()
self.fell_off_map()
self.touched_dont_touch()
self.at_end_of_level()
# Position the camera
self.center_camera_to_player()
def main():
"""Main program code."""
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 32.008174 | 87 | 0.616583 | [
"MIT"
] | casadina/py_arcade | multiple_levels.py | 11,747 | Python |
# -*- coding: utf-8 -*-
"""
werkzeug.formparser
~~~~~~~~~~~~~~~~~~~
This module implements the form parsing. It supports url-encoded forms
as well as non-nested multipart uploads.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from cStringIO import StringIO
from tempfile import TemporaryFile
from itertools import chain, repeat, tee
from functools import update_wrapper
from werkzeug._internal import _decode_unicode, _empty_stream
from werkzeug.urls import url_decode_stream
from werkzeug.wsgi import LimitedStream, make_line_iter
from werkzeug.exceptions import RequestEntityTooLarge
from werkzeug.datastructures import Headers, FileStorage, MultiDict
from werkzeug.http import parse_options_header
#: an iterator that yields empty strings
_empty_string_iter = repeat('')
#: a regular expression for multipart boundaries
_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$')
#: supported http encodings that are also available in python we support
#: for multipart messages.
_supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
def default_stream_factory(total_content_length, filename, content_type,
content_length=None):
"""The stream factory that is used per default."""
if total_content_length > 1024 * 500:
return TemporaryFile('wb+')
return StringIO()
def parse_form_data(environ, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :ref:`dealing-with-request-data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(stream_factory, charset, errors,
max_form_memory_size, max_content_length,
cls, silent).parse_from_environ(environ)
def exhaust_stream(f):
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
stream.exhaust()
return update_wrapper(wrapper, f)
class FormDataParser(object):
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(self, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(self, mimetype, options):
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get('CONTENT_TYPE', '')
mimetype, options = parse_options_header(content_type)
try:
content_length = int(environ['CONTENT_LENGTH'])
except (KeyError, ValueError):
content_length = 0
stream = environ['wsgi.input']
return self.parse(stream, mimetype, content_length, options)
def parse(self, stream, mimetype, content_length, options=None):
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if self.max_content_length is not None and \
content_length > self.max_content_length:
raise RequestEntityTooLarge()
if options is None:
options = {}
input_stream = LimitedStream(stream, content_length)
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, input_stream, mimetype,
content_length, options)
except ValueError:
if not self.silent:
raise
return input_stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(self, stream, mimetype, content_length, options):
parser = MultiPartParser(self.stream_factory, self.charset, self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls)
form, files = parser.parse(stream, options.get('boundary'),
content_length)
return _empty_stream, form, files
@exhaust_stream
def _parse_urlencoded(self, stream, mimetype, content_length, options):
if self.max_form_memory_size is not None and \
content_length > self.max_form_memory_size:
raise RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset,
errors=self.errors, cls=self.cls)
return _empty_stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions = {
'multipart/form-data': _parse_multipart,
'application/x-www-form-urlencoded': _parse_urlencoded,
'application/x-url-encoded': _parse_urlencoded
}
def is_valid_multipart_boundary(boundary):
"""Checks if the string given is a valid multipart boundary."""
return _multipart_boundary_re.match(boundary) is not None
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] == '\r\n':
return line[:-2], True
elif line[-1:] in '\r\n':
return line[:-1], True
return line, False
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
result = []
for line in iterable:
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError('unexpected end of line in multipart header')
if not line:
break
elif line[0] in ' \t' and result:
key, value = result[-1]
result[-1] = (key, value + '\n ' + line[1:])
else:
parts = line.split(':', 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers.linked(result)
_begin_form = intern('begin_form')
_begin_file = intern('begin_file')
_cont = intern('cont')
_end = intern('end')
class MultiPartParser(object):
def __init__(self, stream_factory=None, charset='utf-8', errors='replace',
max_form_memory_size=None, cls=None, buffer_size=10 * 1024):
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
if stream_factory is None:
stream_factory = default_stream_factory
if cls is None:
cls = MultiDict
self.cls = cls
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, 'buffer size has to be at least 1KB'
self.buffer_size = buffer_size
def _fix_ie_filename(self, filename):
"""Internet Explorer 6 transmits the full file name if a file is
uploaded. This function strips the full path if it thinks the
filename is Windows-like absolute.
"""
if filename[1:3] == ':\\' or filename[:2] == '\\\\':
return filename.split('\\')[-1]
return filename
def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return ''
def fail(self, message):
raise ValueError(message)
def get_part_encoding(self, headers):
transfer_encoding = headers.get('content-transfer-encoding')
if transfer_encoding is not None and \
transfer_encoding in _supported_multipart_encodings:
return transfer_encoding
def get_part_charset(self, headers):
# Figure out input charset for current part
content_type = headers.get('content-type')
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get('charset', self.charset)
return self.charset
def start_file_streaming(self, filename, headers, total_content_length):
filename = _decode_unicode(filename, self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get('content-type')
try:
content_length = int(headers['content-length'])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(total_content_length, content_type,
filename, content_length)
return filename, container
def in_memory_threshold_reached(self, bytes):
raise RequestEntityTooLarge()
def validate_boundary(self, boundary):
if not boundary:
self.fail('Missing boundary')
if not is_valid_multipart_boundary(boundary):
self.fail('Invalid boundary: %s' % boundary)
if len(boundary) > self.buffer_size: # pragma: no cover
# this should never happen because we check for a minimum size
# of 1024 and boundaries may not be longer than 200. The only
# situation when this happens is for non debug builds where
# the assert is skipped.
self.fail('Boundary longer than buffer size')
def parse_lines(self, file, boundary, content_length):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
``('cont', bytestring)``
``('end', None)``
Always obeys the grammar
parts = ( begin_form cont* end |
begin_file cont* end )*
"""
next_part = '--' + boundary
last_part = next_part + '--'
iterator = chain(make_line_iter(file, limit=content_length,
buffer_size=self.buffer_size),
_empty_string_iter)
terminator = self._find_terminator(iterator)
if terminator != next_part:
self.fail('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
self.fail('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get('name')
filename = extra.get('filename')
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
yield _begin_form, (headers, name)
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
yield _begin_file, (headers, name, filename)
buf = ''
for line in iterator:
if not line:
self.fail('unexpected end of stream')
if line[:2] == '--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
try:
line = line.decode(transfer_encoding)
except Exception:
self.fail('could not decode transfer encoded chunk')
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
yield _cont, buf
buf = ''
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == '\r\n':
buf = '\r\n'
cutoff = -2
else:
buf = line[-1]
cutoff = -1
yield _cont, line[:cutoff]
else: # pragma: no cover
raise ValueError('unexpected end of part')
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in ('', '\r', '\n', '\r\n'):
yield _cont, buf
yield _end, None
def parse_parts(self, file, boundary, content_length):
"""Generate `('file', (name, val))` and `('form', (name
,val))` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield ('file',
(name, FileStorage(container, filename, name,
headers=headers)))
else:
part_charset = self.get_part_charset(headers)
yield ('form',
(name, _decode_unicode(''.join(container),
part_charset, self.errors)))
def parse(self, file, boundary, content_length):
formstream, filestream = tee(
self.parse_parts(file, boundary, content_length), 2)
form = (p[1] for p in formstream if p[0] == 'form')
files = (p[1] for p in filestream if p[0] == 'file')
return self.cls(form), self.cls(files)
| 41.224 | 81 | 0.598389 | [
"BSD-3-Clause"
] | Chitrank-Dixit/werkzeug | werkzeug/formparser.py | 20,612 | Python |
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
import sys
import os
import pandas as pd
from scipy import stats
HEADERS = ['device', 'layout', 'enc_type', 'n_states', 'sim_type', 'shots', 'optimizer', 'energy', 'meas_mit']
df = pd.DataFrame(columns = HEADERS)
diagonalized_values = [
(0, 0),
(1, -0.43658111),
(2, -1.749160),
(3, -2.045671),
(4, -2.1439810),
(5, -2.183592),
(6, -2.201568),
(7, -2.210416),
(8, -2.215038),
(16, -2.221059)
]
for filename in os.listdir("."):
if "bkp" in filename:
continue
if "energies.npy" not in filename:
continue
if "8_states" not in filename:
continue
if 'yorktown' in filename:
device_name = 'yorktown'
elif 'vigo' in filename:
device_name = 'vigo'
elif 'no_device' in filename:
device_name = 'no device'
else:
continue
print(filename)
enc_type = 'Gray code' if 'gray_code' in filename else 'Jordan-Wigner'
optimizer = 'SPSA' if 'SPSA' in filename else 'Nelder-Mead'
sim_type = 'QASM' if 'qasm' in filename else statevector
meas_mit = 'True' if 'mit_meas' in filename else 'False'
if device_name == 'no device':
meas_mit = 'None'
if device_name == 'no device':
layout = 'None'
circ = 'None'
else:
if 'layout-4-2-3' in filename:
layout = '{4,2,3}'
circ = 'True'
elif 'layout-4-2-1' in filename:
layout = '{4,2,1}'
circ = 'False'
elif 'layout_None' in filename:
layout = 'None'
circ = 'None'
elif 'layout-0-1-2' in filename:
layout = '{0,1,2}'
circ = 'False'
else:
continue
#raise ValueError
#layout = 'None'
#circ = 'None'
n_shots = 10000
n_states = 8
base_dict = {'device' : device_name,
'layout' : layout,
'enc_type' : enc_type,
'n_states' : n_states,
'sim_type' : sim_type,
'shots' : n_shots,
'optimizer' : optimizer,
'meas_mit' : meas_mit,
'circ' : circ
}
print(base_dict)
data = np.load(f"./{filename}")
for energy in data:
next_dict = base_dict
next_dict['energy'] = energy
df = df.append(next_dict, ignore_index=True)
print(df.groupby(['device','layout','enc_type','sim_type','n_states','shots','optimizer','meas_mit']).describe())
#colours = {"True" : "tab:blue", "False" : "tab:orange", "None" : "tab:gray"}
colours = {'vigo' : 'tab:blue', 'yorktown' : 'tab:orange', 'no device' : 'tab:gray'}
linestyles = {('True','vigo') : (0,(1,1)), ('False','vigo') : (0,(5,1)), ('None','no device') : '-.', ('True','yorktown') : (0,(1,5)), ('False','yorktown') : (0,(5,5))}
fig, ax = plt.subplots(figsize=(8,5))
for key, grp in df.groupby(['circ','meas_mit','enc_type','layout','device']):
if key[2] == 'Jordan-Wigner':
continue
if key[1] == 'False':
continue
if key[0] == 'True':
label = f'Loop: {key[3]}'
elif key[0] == 'False':
label = f'Line: {key[3]}'
else:
label = 'No noise'
if key[4] == 'vigo':
label = label + ' (V)'
elif key[4] == 'yorktown':
label = label + ' (Y)'
sns.kdeplot(grp['energy'],bw='scott',label=f"{label}",color=colours[key[4]],linestyle=linestyles[(key[0],key[4])],ax=ax)
ax.axvline(x=diagonalized_values[8][1], color='black', label='True value (N = 8)', alpha=0.8)
handles, labels = ax.get_legend_handles_labels()
order = [0,1,3,2,4]
handles, labels = [handles[i] for i in order], [labels[i] for i in order]
ax.legend(handles,labels,fontsize=14)
ax.set_xlabel("Energy", fontsize=16)
#ax.set_xlim(-3,10)
#plt.ylim(0,20)
#plt.xticks(fontsize=16)
#plt.yticks(fontsize=16)
ax.tick_params(labelsize=16)
#title_string = f"Yorktown, meas_mit={key[1]}"
#plt.title(title_string, fontsize=20)
fig.tight_layout()
plt.savefig(f"./8_states_yorktown.pdf")
plt.show()
| 27.111842 | 168 | 0.568794 | [
"MIT"
] | aemccoy/GrayCode-QubitEncoding | paper-data/noise/8state_layout_comparison.py | 4,121 | Python |
"""
The file preprocesses the files/train.txt and files/test.txt files.
I requires the dependency based embeddings by Levy et al.. Download them from his website and change
the embeddingsPath variable in the script to point to the unzipped deps.words file.
"""
from __future__ import print_function
import numpy as np
import gzip
import os
import sys
if (sys.version_info > (3, 0)):
import pickle as pkl
else: #Python 2.7 imports
import cPickle as pkl
outputFilePath = 'pkl/sem-relations.pkl.gz'
#We download English word embeddings from here https://www.cs.york.ac.uk/nlp/extvec/
embeddingsPath = 'embeddings/wiki_extvec.gz'
folder = 'files/'
files = [folder+'train.txt', folder+'test.txt']
#Mapping of the labels to integers
labelsMapping = {'Other':0,
'Message-Topic(e1,e2)':1, 'Message-Topic(e2,e1)':2,
'Product-Producer(e1,e2)':3, 'Product-Producer(e2,e1)':4,
'Instrument-Agency(e1,e2)':5, 'Instrument-Agency(e2,e1)':6,
'Entity-Destination(e1,e2)':7, 'Entity-Destination(e2,e1)':8,
'Cause-Effect(e1,e2)':9, 'Cause-Effect(e2,e1)':10,
'Component-Whole(e1,e2)':11, 'Component-Whole(e2,e1)':12,
'Entity-Origin(e1,e2)':13, 'Entity-Origin(e2,e1)':14,
'Member-Collection(e1,e2)':15, 'Member-Collection(e2,e1)':16,
'Content-Container(e1,e2)':17, 'Content-Container(e2,e1)':18}
words = {}
maxSentenceLen = [0,0]
distanceMapping = {'PADDING': 0, 'LowerMin': 1, 'GreaterMax': 2}
minDistance = -30
maxDistance = 30
for dis in range(minDistance,maxDistance+1):
distanceMapping[dis] = len(distanceMapping)
def createMatrices(file, word2Idx, maxSentenceLen=100):
"""Creates matrices for the events and sentence for the given file"""
labels = []
positionMatrix1 = []
positionMatrix2 = []
tokenMatrix = []
for line in open(file):
splits = line.strip().split('\t')
label = splits[0]
pos1 = splits[1]
pos2 = splits[2]
sentence = splits[3]
tokens = sentence.split(" ")
tokenIds = np.zeros(maxSentenceLen)
positionValues1 = np.zeros(maxSentenceLen)
positionValues2 = np.zeros(maxSentenceLen)
for idx in range(0, min(maxSentenceLen, len(tokens))):
tokenIds[idx] = getWordIdx(tokens[idx], word2Idx)
distance1 = idx - int(pos1)
distance2 = idx - int(pos2)
if distance1 in distanceMapping:
positionValues1[idx] = distanceMapping[distance1]
elif distance1 <= minDistance:
positionValues1[idx] = distanceMapping['LowerMin']
else:
positionValues1[idx] = distanceMapping['GreaterMax']
if distance2 in distanceMapping:
positionValues2[idx] = distanceMapping[distance2]
elif distance2 <= minDistance:
positionValues2[idx] = distanceMapping['LowerMin']
else:
positionValues2[idx] = distanceMapping['GreaterMax']
tokenMatrix.append(tokenIds)
positionMatrix1.append(positionValues1)
positionMatrix2.append(positionValues2)
labels.append(labelsMapping[label])
return np.array(labels, dtype='int32'), np.array(tokenMatrix, dtype='int32'), np.array(positionMatrix1, dtype='int32'), np.array(positionMatrix2, dtype='int32'),
def getWordIdx(token, word2Idx):
"""Returns from the word2Idex table the word index for a given token"""
if token in word2Idx:
return word2Idx[token]
elif token.lower() in word2Idx:
return word2Idx[token.lower()]
return word2Idx["UNKNOWN_TOKEN"]
for fileIdx in range(len(files)):
file = files[fileIdx]
for line in open(file):
splits = line.strip().split('\t')
label = splits[0]
sentence = splits[3]
tokens = sentence.split(" ")
maxSentenceLen[fileIdx] = max(maxSentenceLen[fileIdx], len(tokens))
for token in tokens:
words[token.lower()] = True
print("Max Sentence Lengths: ", maxSentenceLen)
# :: Read in word embeddings ::
# :: Read in word embeddings ::
word2Idx = {}
wordEmbeddings = []
# :: Downloads the embeddings from the York webserver ::
if not os.path.isfile(embeddingsPath):
basename = os.path.basename(embeddingsPath)
if basename == 'wiki_extvec.gz':
print("Start downloading word embeddings for English using wget ...")
#os.system("wget https://www.cs.york.ac.uk/nlp/extvec/"+basename+" -P embeddings/")
os.system("wget https://public.ukp.informatik.tu-darmstadt.de/reimers/2017_english_embeddings/"+basename+" -P embeddings/")
else:
print(embeddingsPath, "does not exist. Please provide pre-trained embeddings")
exit()
# :: Load the pre-trained embeddings file ::
fEmbeddings = gzip.open(embeddingsPath, "r") if embeddingsPath.endswith('.gz') else open(embeddingsPath, encoding="utf8")
print("Load pre-trained embeddings file")
for line in fEmbeddings:
split = line.decode('utf-8').strip().split(" ")
word = split[0]
if len(word2Idx) == 0: #Add padding+unknown
word2Idx["PADDING_TOKEN"] = len(word2Idx)
vector = np.zeros(len(split)-1) #Zero vector vor 'PADDING' word
wordEmbeddings.append(vector)
word2Idx["UNKNOWN_TOKEN"] = len(word2Idx)
vector = np.random.uniform(-0.25, 0.25, len(split)-1)
wordEmbeddings.append(vector)
if word.lower() in words:
vector = np.array([float(num) for num in split[1:]])
wordEmbeddings.append(vector)
word2Idx[word] = len(word2Idx)
wordEmbeddings = np.array(wordEmbeddings)
print("Embeddings shape: ", wordEmbeddings.shape)
print("Len words: ", len(words))
# :: Create token matrix ::
train_set = createMatrices(files[0], word2Idx, max(maxSentenceLen))
test_set = createMatrices(files[1], word2Idx, max(maxSentenceLen))
data = {'wordEmbeddings': wordEmbeddings, 'word2Idx': word2Idx,
'train_set': train_set, 'test_set': test_set}
f = gzip.open(outputFilePath, 'wb')
pkl.dump(data, f)
f.close()
print("Data stored in pkl folder")
| 32.179104 | 165 | 0.621212 | [
"Apache-2.0"
] | BhuvaneshwaranK/deeplearning4nlp-tutorial | 2017-07_Seminar/Session 3 - Relation CNN/code/preprocess.py | 6,468 | Python |
import os
RESOURCE_DIR = os.path.join(os.path.dirname(__file__), 'resources')
FONT_FILE_PATH = os.path.join(RESOURCE_DIR, 'DejaVuSans.ttf')
| 23.666667 | 67 | 0.767606 | [
"MIT"
] | alok1974/minescrubber | src/minescrubber/conf.py | 142 | Python |
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Classes related to graph clustering.
@undocumented: _handle_mark_groups_arg_for_clustering, _prepare_community_comparison"""
__license__ = u"""
Copyright (C) 2006-2012 Tamás Nepusz <[email protected]>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
from copy import deepcopy
from itertools import izip
from math import pi
from cStringIO import StringIO
from igraph import community_to_membership
from igraph.compat import property
from igraph.configuration import Configuration
from igraph.datatypes import UniqueIdGenerator
from igraph.drawing.colors import ClusterColoringPalette
from igraph.statistics import Histogram
from igraph.summary import _get_wrapper_for_width
from igraph.utils import str_to_orientation
class Clustering(object):
"""Class representing a clustering of an arbitrary ordered set.
This is now used as a base for L{VertexClustering}, but it might be
useful for other purposes as well.
Members of an individual cluster can be accessed by the C{[]} operator:
>>> cl = Clustering([0,0,0,0,1,1,1,2,2,2,2])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property:
>>> cl.membership
[0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the clustering object as if it were a regular list
of clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
4 5 6
7 8 9 10
If you need all the clusters at once as lists, you can simply convert
the clustering object to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [4, 5, 6], [7, 8, 9, 10]]
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, membership, params = None):
"""Constructor.
@param membership: the membership list -- that is, the cluster
index in which each element of the set belongs to.
@param params: additional parameters to be stored in this
object's dictionary."""
self._membership = list(membership)
if len(self._membership)>0:
self._len = max(m for m in self._membership if m is not None)+1
else:
self._len = 0
if params:
self.__dict__.update(params)
def __getitem__(self, idx):
"""Returns the members of the specified cluster.
@param idx: the index of the cluster
@return: the members of the specified cluster as a list
@raise IndexError: if the index is out of bounds"""
if idx < 0 or idx >= self._len:
raise IndexError("cluster index out of range")
return [i for i, e in enumerate(self._membership) if e == idx]
def __iter__(self):
"""Iterates over the clusters in this clustering.
This method will return a generator that generates the clusters
one by one."""
clusters = [[] for _ in xrange(self._len)]
for idx, cluster in enumerate(self._membership):
clusters[cluster].append(idx)
return iter(clusters)
def __len__(self):
"""Returns the number of clusters.
@return: the number of clusters
"""
return self._len
def __str__(self):
return self.summary(verbosity=1, width=78)
def as_cover(self):
"""Returns a L{Cover} that contains the same clusters as this clustering."""
return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds):
"""Compares this clustering to another one using some similarity or
distance metric.
This is a convenience method that simply calls L{compare_communities}
with the two clusterings as arguments. Any extra positional or keyword
argument is also forwarded to L{compare_communities}."""
return compare_communities(self, other, *args, **kwds)
@property
def membership(self):
"""Returns the membership vector."""
return self._membership[:]
@property
def n(self):
"""Returns the number of elements covered by this clustering."""
return len(self._membership)
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
counts = [0] * len(self)
for x in self._membership:
counts[x] += 1
if args:
return [counts[idx] for idx in args]
return counts
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the clustering.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the clustering as a string.
"""
out = StringIO()
print >>out, "Clustering with %d elements and %d clusters" % \
(len(self._membership), len(self))
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexClustering(Clustering):
"""The clustering of the vertex set of a graph.
This class extends L{Clustering} by linking it to a specific L{Graph} object
and by optionally storing the modularity score of the clustering.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexClustering} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
# Allow None to be passed to __plot__ as the "palette" keyword argument
_default_palette = None
def __init__(self, graph, membership = None, modularity = None, \
params = None, modularity_params = None):
"""Creates a clustering object for a given graph.
@param graph: the graph that will be associated to the clustering
@param membership: the membership list. The length of the list must
be equal to the number of vertices in the graph. If C{None}, every
vertex is assumed to belong to the same cluster.
@param modularity: the modularity score of the clustering. If C{None},
it will be calculated when needed.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
if membership is None:
Clustering.__init__(self, [0]*graph.vcount(), params)
else:
if len(membership) != graph.vcount():
raise ValueError("membership list has invalid length")
Clustering.__init__(self, membership, params)
self._graph = graph
self._modularity = modularity
self._modularity_dirty = modularity is None
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
# pylint: disable-msg=C0103
@classmethod
def FromAttribute(cls, graph, attribute, intervals=None, params=None):
"""Creates a vertex clustering based on the value of a vertex attribute.
Vertices having the same attribute will correspond to the same cluster.
@param graph: the graph on which we are working
@param attribute: name of the attribute on which the clustering
is based.
@param intervals: for numeric attributes, you can either pass a single
number or a list of numbers here. A single number means that the
vertices will be put in bins of that width and vertices ending up
in the same bin will be in the same cluster. A list of numbers
specify the bin positions explicitly; e.g., C{[10, 20, 30]} means
that there will be four categories: vertices with the attribute
value less than 10, between 10 and 20, between 20 and 30 and over 30.
Intervals are closed from the left and open from the right.
@param params: additional parameters to be stored in this object.
@return: a new VertexClustering object
"""
from bisect import bisect
def safeintdiv(x, y):
"""Safe integer division that handles None gracefully"""
if x is None:
return None
return int(x / y)
def safebisect(intervals, x):
"""Safe list bisection that handles None gracefully"""
if x is None:
return None
return bisect(intervals, x)
try:
_ = iter(intervals)
iterable = True
except TypeError:
iterable = False
if intervals is None:
vec = graph.vs[attribute]
elif iterable:
intervals = list(intervals)
vec = [safebisect(intervals, x) for x in graph.vs[attribute]]
else:
intervals = float(intervals)
vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]]
idgen = UniqueIdGenerator()
idgen[None] = None
vec = [idgen[i] for i in vec]
return cls(graph, vec, None, params)
def as_cover(self):
"""Returns a L{VertexCover} that contains the same clusters as this
clustering."""
return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None):
"""Returns a graph where each cluster is contracted into a single
vertex.
In the resulting graph, vertex M{i} represents cluster M{i} in this
clustering. Vertex M{i} and M{j} will be connected if there was
at least one connected vertex pair M{(a, b)} in the original graph such
that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster
M{j}.
@param combine_vertices: specifies how to derive the attributes of
the vertices in the new graph from the attributes of the old ones.
See L{Graph.contract_vertices()} for more details.
@param combine_edges: specifies how to derive the attributes of the
edges in the new graph from the attributes of the old ones. See
L{Graph.simplify()} for more details. If you specify C{False}
here, edges will not be combined, and the number of edges between
the vertices representing the original clusters will be equal to
the number of edges between the members of those clusters in the
original graph.
@return: the new graph.
"""
result = self.graph.copy()
result.contract_vertices(self.membership, combine_vertices)
if combine_edges != False:
result.simplify(combine_edges=combine_edges)
return result
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = self.membership
return [membership[v1] != membership[v2] \
for v1, v2 in self.graph.get_edgelist()]
@property
def modularity(self):
"""Returns the modularity score"""
if self._modularity_dirty:
return self._recalculate_modularity_safe()
return self._modularity
q = modularity
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def recalculate_modularity(self):
"""Recalculates the stored modularity value.
This method must be called before querying the modularity score of the
clustering through the class member C{modularity} or C{q} if the
graph has been modified (edges have been added or removed) since the
creation of the L{VertexClustering} object.
@return: the new modularity score
"""
self._modularity = self._graph.modularity(self._membership,
**self._modularity_params)
self._modularity_dirty = False
return self._modularity
def _recalculate_modularity_safe(self):
"""Recalculates the stored modularity value and swallows all exceptions
raised by the modularity function (if any).
@return: the new modularity score or C{None} if the modularity function
could not be calculated.
"""
try:
return self.recalculate_modularity()
except:
return None
finally:
self._modularity_dirty = False
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def giant(self):
"""Returns the giant community of the clustered graph.
The giant component a community for which no larger community exists.
@note: there can be multiple giant communities, this method will return
the copy of an arbitrary one if there are multiple giant communities.
@return: a copy of the giant community.
@precondition: the vertex set of the graph hasn't been modified since
the moment the clustering was constructed.
"""
ss = self.sizes()
max_size = max(ss)
return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the clustering to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
coloring the graph vertices according to the current clustering (unless
overridden by the C{vertex_color} argument explicitly).
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight some of the vertex groups by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the groups will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if palette is None:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = (
(group, color) for color, group in enumerate(self)
)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.membership
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
###############################################################################
class Dendrogram(object):
"""The hierarchical clustering (dendrogram) of some dataset.
A hierarchical clustering means that we know not only the way the
elements are separated into groups, but also the exact history of
how individual elements were joined into larger subgroups.
This class internally represents the hierarchy by a matrix with n rows
and 2 columns -- or more precisely, a list of lists of size 2. This is
exactly the same as the original format used by C{igraph}'s C core.
The M{i}th row of the matrix contains the indices of the two clusters
being joined in time step M{i}. The joint group will be represented by
the ID M{n+i}, with M{i} starting from one. The ID of the joint group
will be referenced in the upcoming steps instead of any of its individual
members. So, IDs less than or equal to M{n} (where M{n} is the number of
rows in the matrix) mean the original members of the dataset (with ID
from 0 to M{n}), while IDs up from M{n+1} mean joint groups. As an
example, take a look at the dendrogram and the internal representation of
a given clustering of five nodes::
0 -+
|
1 -+-+
|
2 ---+-+ <====> [[0, 1], [3, 4], [2, 5], [6, 7]]
|
3 -+ |
| |
4 -+---+---
@undocumented: _item_box_size, _plot_item, _traverse_inorder
"""
def __init__(self, merges):
"""Creates a hierarchical clustering.
@param merges: the merge history either in matrix or tuple format"""
self._merges = [tuple(pair) for pair in merges]
self._nmerges = len(self._merges)
if self._nmerges:
self._nitems = max(self._merges[-1])-self._nmerges+2
else:
self._nitems = 0
self._names = None
@staticmethod
def _convert_matrix_to_tuple_repr(merges, n=None):
"""Converts the matrix representation of a clustering to a tuple
representation.
@param merges: the matrix representation of the clustering
@return: the tuple representation of the clustering
"""
if n is None:
n = len(merges)+1
tuple_repr = range(n)
idxs = range(n)
for rowidx, row in enumerate(merges):
i, j = row
try:
idxi, idxj = idxs[i], idxs[j]
tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj])
tuple_repr[idxj] = None
except IndexError:
raise ValueError("malformed matrix, subgroup referenced "+
"before being created in step %d" % rowidx)
idxs.append(j)
return [x for x in tuple_repr if x is not None]
def _traverse_inorder(self):
"""Conducts an inorder traversal of the merge tree.
The inorder traversal returns the nodes on the last level in the order
they should be drawn so that no edges cross each other.
@return: the result of the inorder traversal in a list."""
result = []
seen_nodes = set()
for node_index in reversed(xrange(self._nitems+self._nmerges)):
if node_index in seen_nodes:
continue
stack = [node_index]
while stack:
last = stack.pop()
seen_nodes.add(last)
if last < self._nitems:
# 'last' is a regular node so the traversal ends here, we
# can append it to the results
result.append(last)
else:
# 'last' is a merge node, so let us proceed with the entry
# where this merge node was created
stack.extend(self._merges[last-self._nitems])
return result
def __str__(self):
return self.summary(verbosity=1)
def format(self, format="newick"):
"""Formats the dendrogram in a foreign format.
Currently only the Newick format is supported.
Example:
>>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])
>>> d.format()
'((2,3)4,(0,1)5)6;'
>>> d.names = list("ABCDEFG")
>>> d.format()
'((C,D)E,(A,B)F)G;'
"""
if format == "newick":
n = self._nitems + self._nmerges
if self._names is None:
nodes = range(n)
else:
nodes = list(self._names)
if len(nodes) < n:
nodes.extend("" for _ in xrange(n - len(nodes)))
for k, (i, j) in enumerate(self._merges, self._nitems):
nodes[k] = "(%s,%s)%s" % (nodes[i], nodes[j], nodes[k])
nodes[i] = nodes[j] = None
return nodes[-1] + ";"
raise ValueError("unsupported format: %r" % format)
def summary(self, verbosity=0, max_leaf_count=40):
"""Returns the summary of the dendrogram.
The summary includes the number of leafs and branches, and also an
ASCII art representation of the dendrogram unless it is too large.
@param verbosity: determines whether the ASCII representation of the
dendrogram should be printed. Zero verbosity prints only the number
of leafs and branches.
@param max_leaf_count: the maximal number of leafs to print in the
ASCII representation. If the dendrogram has more leafs than this
limit, the ASCII representation will not be printed even if the
verbosity is larger than or equal to 1.
@return: the summary of the dendrogram as a string.
"""
out = StringIO()
print >>out, "Dendrogram, %d elements, %d merges" % \
(self._nitems, self._nmerges)
if self._nitems == 0 or verbosity < 1 or self._nitems > max_leaf_count:
return out.getvalue().strip()
print >>out
positions = [None] * self._nitems
inorder = self._traverse_inorder()
distance = 2
level_distance = 2
nextp = 0
for idx, element in enumerate(inorder):
positions[element] = nextp
inorder[idx] = str(element)
nextp += max(distance, len(inorder[idx])+1)
width = max(positions)+1
# Print the nodes on the lowest level
print >>out, (" " * (distance-1)).join(inorder)
midx = 0
max_community_idx = self._nitems
while midx < self._nmerges:
char_array = [" "] * width
for position in positions:
if position >= 0:
char_array[position] = "|"
char_str = "".join(char_array)
for _ in xrange(level_distance-1):
print >>out, char_str # Print the lines
cidx_incr = 0
while midx < self._nmerges:
id1, id2 = self._merges[midx]
if id1 >= max_community_idx or id2 >= max_community_idx:
break
midx += 1
pos1, pos2 = positions[id1], positions[id2]
positions[id1], positions[id2] = -1, -1
if pos1 > pos2:
pos1, pos2 = pos2, pos1
positions.append((pos1+pos2) // 2)
dashes = "-" * (pos2 - pos1 - 1)
char_array[pos1:(pos2+1)] = "`%s'" % dashes
cidx_incr += 1
max_community_idx += cidx_incr
print >>out, "".join(char_array)
return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx):
"""Calculates the amount of space needed for drawing an
individual vertex at the bottom of the dendrogram."""
if self._names is None or self._names[idx] is None:
x_bearing, _, _, height, x_advance, _ = context.text_extents("")
else:
x_bearing, _, _, height, x_advance, _ = context.text_extents(str(self._names[idx]))
if horiz:
return x_advance - x_bearing, height
return height, x_advance - x_bearing
# pylint: disable-msg=R0913
def _plot_item(self, context, horiz, idx, x, y):
"""Plots a dendrogram item to the given Cairo context
@param context: the Cairo context we are plotting on
@param horiz: whether the dendrogram is horizontally oriented
@param idx: the index of the item
@param x: the X position of the item
@param y: the Y position of the item
"""
if self._names is None or self._names[idx] is None:
return
height = self._item_box_size(context, True, idx)[1]
if horiz:
context.move_to(x, y+height)
context.show_text(str(self._names[idx]))
else:
context.save()
context.translate(x, y)
context.rotate(-pi/2.)
context.move_to(0, height)
context.show_text(str(self._names[idx]))
context.restore()
# pylint: disable-msg=C0103,W0613
# W0613 = unused argument 'palette'
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the dendrogram on the given Cairo context
Supported keyword arguments are:
- C{orientation}: the orientation of the dendrogram. Must be one of
the following values: C{left-right}, C{bottom-top}, C{right-left}
or C{top-bottom}. Individual elements are always placed at the
former edge and merges are performed towards the latter edge.
Possible aliases: C{horizontal} = C{left-right},
C{vertical} = C{bottom-top}, C{lr} = C{left-right},
C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.
The default is C{left-right}.
"""
from igraph.layout import Layout
if self._names is None:
self._names = [str(x) for x in xrange(self._nitems)]
orientation = str_to_orientation(kwds.get("orientation", "lr"),
reversed_vertical=True)
horiz = orientation in ("lr", "rl")
# Get the font height
font_height = context.font_extents()[2]
# Calculate space needed for individual items at the
# bottom of the dendrogram
item_boxes = [self._item_box_size(context, horiz, idx) \
for idx in xrange(self._nitems)]
# Small correction for cases when the right edge of the labels is
# aligned with the tips of the dendrogram branches
ygap = 2 if orientation == "bt" else 0
xgap = 2 if orientation == "lr" else 0
item_boxes = [(x+xgap, y+ygap) for x, y in item_boxes]
# Calculate coordinates
layout = Layout([(0, 0)] * self._nitems, dim=2)
inorder = self._traverse_inorder()
if not horiz:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (x, 0)
x += max(font_height, item_boxes[element][0])
for id1, id2 in self._merges:
y += 1
layout.append(((layout[id1][0]+layout[id2][0])/2., y))
# Mirror or rotate the layout if necessary
if orientation == "bt":
layout.mirror(1)
else:
x, y = 0, 0
for idx, element in enumerate(inorder):
layout[element] = (0, y)
y += max(font_height, item_boxes[element][1])
for id1, id2 in self._merges:
x += 1
layout.append((x, (layout[id1][1]+layout[id2][1])/2.))
# Mirror or rotate the layout if necessary
if orientation == "rl":
layout.mirror(0)
# Rescale layout to the bounding box
maxw = max(e[0] for e in item_boxes)
maxh = max(e[1] for e in item_boxes)
# w, h: width and height of the area containing the dendrogram
# tree without the items.
# delta_x, delta_y: displacement of the dendrogram tree
width, height = float(bbox.width), float(bbox.height)
delta_x, delta_y = 0, 0
if horiz:
width -= maxw
if orientation == "lr":
delta_x = maxw
else:
height -= maxh
if orientation == "tb":
delta_y = maxh
if horiz:
delta_y += font_height / 2.
else:
delta_x += font_height / 2.
layout.fit_into((delta_x, delta_y, width - delta_x, height - delta_y),
keep_aspect_ratio=False)
context.save()
context.translate(bbox.left, bbox.top)
context.set_source_rgb(0., 0., 0.)
context.set_line_width(1)
# Draw items
if horiz:
sgn = 0 if orientation == "rl" else -1
for idx in xrange(self._nitems):
x = layout[idx][0] + sgn * item_boxes[idx][0]
y = layout[idx][1] - item_boxes[idx][1]/2.
self._plot_item(context, horiz, idx, x, y)
else:
sgn = 1 if orientation == "bt" else 0
for idx in xrange(self._nitems):
x = layout[idx][0] - item_boxes[idx][0]/2.
y = layout[idx][1] + sgn * item_boxes[idx][1]
self._plot_item(context, horiz, idx, x, y)
# Draw dendrogram lines
if not horiz:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x0, y2)
context.line_to(x1, y2)
context.line_to(x1, y1)
context.stroke()
else:
for idx, (id1, id2) in enumerate(self._merges):
x0, y0 = layout[id1]
x1, y1 = layout[id2]
x2, y2 = layout[idx + self._nitems]
context.move_to(x0, y0)
context.line_to(x2, y0)
context.line_to(x2, y1)
context.line_to(x1, y1)
context.stroke()
context.restore()
@property
def merges(self):
"""Returns the performed merges in matrix format"""
return deepcopy(self._merges)
@property
def names(self):
"""Returns the names of the nodes in the dendrogram"""
return self._names
@names.setter
def names(self, items):
"""Sets the names of the nodes in the dendrogram"""
if items is None:
self._names = None
return
items = list(items)
if len(items) < self._nitems:
raise ValueError("must specify at least %d names" % self._nitems)
n = self._nitems + self._nmerges
self._names = items[:n]
if len(self._names) < n:
self._names.extend("" for _ in xrange(n-len(self._names)))
class VertexDendrogram(Dendrogram):
"""The dendrogram resulting from the hierarchical clustering of the
vertex set of a graph."""
def __init__(self, graph, merges, optimal_count = None, params = None,
modularity_params = None):
"""Creates a dendrogram object for a given graph.
@param graph: the graph that will be associated to the clustering
@param merges: the merges performed given in matrix form.
@param optimal_count: the optimal number of clusters where the
dendrogram should be cut. This is a hint usually provided by the
clustering algorithm that produces the dendrogram. C{None} means
that such a hint is not available; the optimal count will then be
selected based on the modularity in such a case.
@param params: additional parameters to be stored in this object.
@param modularity_params: arguments that should be passed to
L{Graph.modularity} when the modularity is (re)calculated. If the
original graph was weighted, you should pass a dictionary
containing a C{weight} key with the appropriate value here.
"""
Dendrogram.__init__(self, merges)
self._graph = graph
self._optimal_count = optimal_count
if modularity_params is None:
self._modularity_params = {}
else:
self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None):
"""Cuts the dendrogram at the given level and returns a corresponding
L{VertexClustering} object.
@param n: the desired number of clusters. Merges are replayed from the
beginning until the membership vector has exactly M{n} distinct elements
or until there are no more recorded merges, whichever happens first.
If C{None}, the optimal count hint given by the clustering algorithm
will be used If the optimal count was not given either, it will be
calculated by selecting the level where the modularity is maximal.
@return: a new L{VertexClustering} object.
"""
if n is None:
n = self.optimal_count
num_elts = self._graph.vcount()
idgen = UniqueIdGenerator()
membership = community_to_membership(self._merges, num_elts, \
num_elts - n)
membership = [idgen[m] for m in membership]
return VertexClustering(self._graph, membership,
modularity_params=self._modularity_params)
@property
def optimal_count(self):
"""Returns the optimal number of clusters for this dendrogram.
If an optimal count hint was given at construction time, this
property simply returns the hint. If such a count was not given,
this method calculates the optimal number of clusters by maximizing
the modularity along all the possible cuts in the dendrogram.
"""
if self._optimal_count is not None:
return self._optimal_count
n = self._graph.vcount()
max_q, optimal_count = 0, 1
for step in xrange(min(n-1, len(self._merges))):
membs = community_to_membership(self._merges, n, step)
q = self._graph.modularity(membs, **self._modularity_params)
if q > max_q:
optimal_count = n-step
max_q = q
self._optimal_count = optimal_count
return optimal_count
@optimal_count.setter
def optimal_count(self, value):
self._optimal_count = max(int(value), 1)
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Draws the vertex dendrogram on the given Cairo context
See L{Dendrogram.__plot__} for the list of supported keyword
arguments."""
from igraph.drawing.metamagic import AttributeCollectorBase
class VisualVertexBuilder(AttributeCollectorBase):
_kwds_prefix = "vertex_"
label = None
builder = VisualVertexBuilder(self._graph.vs, kwds)
self._names = [vertex.label for vertex in builder]
self._names = [name if name is not None else str(idx)
for idx, name in enumerate(self._names)]
result = Dendrogram.__plot__(self, context, bbox, palette, \
*args, **kwds)
del self._names
return result
###############################################################################
class Cover(object):
"""Class representing a cover of an arbitrary ordered set.
Covers are similar to clusterings, but each element of the set may
belong to more than one cluster in a cover, and elements not belonging
to any cluster are also allowed.
L{Cover} instances provide a similar API as L{Clustering} instances;
for instance, iterating over a L{Cover} will iterate over the clusters
just like with a regular L{Clustering} instance. However, they are not
derived from each other or from a common superclass, and there might
be functions that exist only in one of them or the other.
Clusters of an individual cover can be accessed by the C{[]} operator:
>>> cl = Cover([[0,1,2,3], [2,3,4], [0,1,6]])
>>> cl[0]
[0, 1, 2, 3]
The membership vector can be accessed by the C{membership} property.
Note that contrary to L{Clustering} instances, the membership vector
will contain lists that contain the cluster indices each item belongs
to:
>>> cl.membership
[[0, 2], [0, 2], [0, 1], [0, 1], [1], [], [2]]
The number of clusters can be retrieved by the C{len} function:
>>> len(cl)
3
You can iterate over the cover as if it were a regular list of
clusters:
>>> for cluster in cl:
... print " ".join(str(idx) for idx in cluster)
...
0 1 2 3
2 3 4
0 1 6
If you need all the clusters at once as lists, you can simply convert
the cover to a list:
>>> cluster_list = list(cl)
>>> print cluster_list
[[0, 1, 2, 3], [2, 3, 4], [0, 1, 6]]
L{Clustering} objects can readily be converted to L{Cover} objects
using the constructor:
>>> clustering = Clustering([0, 0, 0, 0, 1, 1, 1, 2, 2, 2])
>>> cover = Cover(clustering)
>>> list(clustering) == list(cover)
True
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, clusters, n=0):
"""Constructs a cover with the given clusters.
@param clusters: the clusters in this cover, as a list or iterable.
Each cluster is specified by a list or tuple that contains the
IDs of the items in this cluster. IDs start from zero.
@param n: the total number of elements in the set that is covered
by this cover. If it is less than the number of unique elements
found in all the clusters, we will simply use the number of unique
elements, so it is safe to leave this at zero. You only have to
specify this parameter if there are some elements that are covered
by none of the clusters.
"""
self._clusters = [list(cluster) for cluster in clusters]
try:
self._n = max(max(cluster)+1 for cluster in self._clusters if cluster)
except ValueError:
self._n = 0
self._n = max(n, self._n)
def __getitem__(self, index):
"""Returns the cluster with the given index."""
return self._clusters[index]
def __iter__(self):
"""Iterates over the clusters in this cover."""
return iter(self._clusters)
def __len__(self):
"""Returns the number of clusters in this cover."""
return len(self._clusters)
def __str__(self):
"""Returns a string representation of the cover."""
return self.summary(verbosity=1, width=78)
@property
def membership(self):
"""Returns the membership vector of this cover.
The membership vector of a cover covering I{n} elements is a list of
length I{n}, where element I{i} contains the cluster indices of the
I{i}th item.
"""
result = [[] for _ in xrange(self._n)]
for idx, cluster in enumerate(self):
for item in cluster:
result[item].append(idx)
return result
@property
def n(self):
"""Returns the number of elements in the set covered by this cover."""
return self._n
def size(self, idx):
"""Returns the size of a given cluster.
@param idx: the cluster in which we are interested.
"""
return len(self[idx])
def sizes(self, *args):
"""Returns the size of given clusters.
The indices are given as positional arguments. If there are no
positional arguments, the function will return the sizes of all clusters.
"""
if args:
return [len(self._clusters[idx]) for idx in args]
return [len(cluster) for cluster in self]
def size_histogram(self, bin_width = 1):
"""Returns the histogram of cluster sizes.
@param bin_width: the bin width of the histogram
@return: a L{Histogram} object
"""
return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None):
"""Returns the summary of the cover.
The summary includes the number of items and clusters, and also the
list of members for each of the clusters if the verbosity is nonzero.
@param verbosity: determines whether the cluster members should be
printed. Zero verbosity prints the number of items and clusters only.
@return: the summary of the cover as a string.
"""
out = StringIO()
print >>out, "Cover with %d clusters" % len(self)
if verbosity < 1:
return out.getvalue().strip()
ndigits = len(str(len(self)))
wrapper = _get_wrapper_for_width(width,
subsequent_indent = " " * (ndigits+3))
for idx, cluster in enumerate(self._formatted_cluster_iterator()):
wrapper.initial_indent = "[%*d] " % (ndigits, idx)
print >>out, "\n".join(wrapper.wrap(cluster))
return out.getvalue().strip()
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class VertexCover(Cover):
"""The cover of the vertex set of a graph.
This class extends L{Cover} by linking it to a specific L{Graph} object.
It also provides some handy methods like getting the subgraph corresponding
to a cluster and such.
@note: since this class is linked to a L{Graph}, destroying the graph by the
C{del} operator does not free the memory occupied by the graph if there
exists a L{VertexCover} that references the L{Graph}.
@undocumented: _formatted_cluster_iterator
"""
def __init__(self, graph, clusters = None):
"""Creates a cover object for a given graph.
@param graph: the graph that will be associated to the cover
@param clusters: the list of clusters. If C{None}, it is assumed
that there is only a single cluster that covers the whole graph.
"""
if clusters is None:
clusters = [range(graph.vcount())]
Cover.__init__(self, clusters, n = graph.vcount())
if self._n > graph.vcount():
raise ValueError("cluster list contains vertex ID larger than the "
"number of vertices in the graph")
self._graph = graph
def crossing(self):
"""Returns a boolean vector where element M{i} is C{True} iff edge
M{i} lies between clusters, C{False} otherwise."""
membership = [frozenset(cluster) for cluster in self.membership]
return [membership[v1].isdisjoint(membership[v2]) \
for v1, v2 in self.graph.get_edgelist()]
@property
def graph(self):
"""Returns the graph belonging to this object"""
return self._graph
def subgraph(self, idx):
"""Get the subgraph belonging to a given cluster.
@param idx: the cluster index
@return: a copy of the subgraph
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return self._graph.subgraph(self[idx])
def subgraphs(self):
"""Gets all the subgraphs belonging to each of the clusters.
@return: a list containing copies of the subgraphs
@precondition: the vertex set of the graph hasn't been modified since
the moment the cover was constructed.
"""
return [self._graph.subgraph(cl) for cl in self]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cover to the given Cairo context in the given
bounding box.
This is done by calling L{Graph.__plot__()} with the same arguments, but
drawing nice colored blobs around the vertex groups.
This method understands all the positional and keyword arguments that
are understood by L{Graph.__plot__()}, only the differences will be
highlighted here:
- C{mark_groups}: whether to highlight the vertex clusters by
colored polygons. Besides the values accepted by L{Graph.__plot__}
(i.e., a dict mapping colors to vertex indices, a list containing
lists of vertex indices, or C{False}), the following are also
accepted:
- C{True}: all the clusters will be highlighted, the colors matching
the corresponding color indices from the current palette
(see the C{palette} keyword argument of L{Graph.__plot__}.
- A dict mapping cluster indices or tuples of vertex indices to
color names. The given clusters or vertex groups will be
highlighted by the given colors.
- A list of cluster indices. This is equivalent to passing a
dict mapping numeric color indices from the current palette
to cluster indices; therefore, the cluster referred to by element
I{i} of the list will be highlighted by color I{i} from the
palette.
The value of the C{plotting.mark_groups} configuration key is also
taken into account here; if that configuration key is C{True} and
C{mark_groups} is not given explicitly, it will automatically be set
to C{True}.
In place of lists of vertex indices, you may also use L{VertexSeq}
instances.
In place of color names, you may also use color indices into the
current palette. C{None} as a color name will mean that the
corresponding group is ignored.
- C{palette}: the palette used to resolve numeric color indices to RGBA
values. By default, this is an instance of L{ClusterColoringPalette}.
@see: L{Graph.__plot__()} for more supported keyword arguments.
"""
if "edge_color" not in kwds and "color" not in self.graph.edge_attributes():
# Set up a default edge coloring based on internal vs external edges
colors = ["grey20", "grey80"]
kwds["edge_color"] = [colors[is_crossing]
for is_crossing in self.crossing()]
if "palette" in kwds:
palette = kwds["palette"]
else:
palette = ClusterColoringPalette(len(self))
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
kwds["mark_groups"] = enumerate(self)
else:
kwds["mark_groups"] = _handle_mark_groups_arg_for_clustering(
kwds["mark_groups"], self)
return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self):
"""Iterates over the clusters and formats them into a string to be
presented in the summary."""
if self._graph.is_named():
names = self._graph.vs["name"]
for cluster in self:
yield ", ".join(str(names[member]) for member in cluster)
else:
for cluster in self:
yield ", ".join(str(member) for member in cluster)
class CohesiveBlocks(VertexCover):
"""The cohesive block structure of a graph.
Instances of this type are created by L{Graph.cohesive_blocks()}. See
the documentation of L{Graph.cohesive_blocks()} for an explanation of
what cohesive blocks are.
This class provides a few more methods that make handling of cohesive
block structures easier.
"""
def __init__(self, graph, blocks = None, cohesion = None, parent = None):
"""Constructs a new cohesive block structure for the given graph.
If any of I{blocks}, I{cohesion} or I{parent} is C{None}, all the
arguments will be ignored and L{Graph.cohesive_blocks()} will be
called to calculate the cohesive blocks. Otherwise, these three
variables should describe the *result* of a cohesive block structure
calculation. Chances are that you never have to construct L{CohesiveBlocks}
instances directly, just use L{Graph.cohesive_blocks()}.
@param graph: the graph itself
@param blocks: a list containing the blocks; each block is described
as a list containing vertex IDs.
@param cohesion: the cohesion of each block. The length of this list
must be equal to the length of I{blocks}.
@param parent: the parent block of each block. Negative values or
C{None} mean that there is no parent block for that block. There
should be only one parent block, which covers the entire graph.
@see: Graph.cohesive_blocks()
"""
if blocks is None or cohesion is None or parent is None:
blocks, cohesion, parent = graph.cohesive_blocks()
VertexCover.__init__(self, graph, blocks)
self._cohesion = cohesion
self._parent = parent
for idx, p in enumerate(self._parent):
if p < 0:
self._parent[idx] = None
def cohesion(self, idx):
"""Returns the cohesion of the group with the given index."""
return self._cohesion[idx]
def cohesions(self):
"""Returns the list of cohesion values for each group."""
return self._cohesion[:]
def hierarchy(self):
"""Returns a new graph that describes the hierarchical relationships
between the groups.
The new graph will be a directed tree; an edge will point from
vertex M{i} to vertex M{j} if group M{i} is a superset of group M{j}.
In other words, the edges point downwards.
"""
from igraph import Graph
edges = [pair for pair in izip(self._parent, xrange(len(self)))
if pair[0] is not None]
return Graph(edges, directed=True)
def max_cohesion(self, idx):
"""Finds the maximum cohesion score among all the groups that contain
the given vertex."""
result = 0
for cohesion, cluster in izip(self._cohesion, self._clusters):
if idx in cluster:
result = max(result, cohesion)
return result
def max_cohesions(self):
"""For each vertex in the graph, returns the maximum cohesion score
among all the groups that contain the vertex."""
result = [0] * self._graph.vcount()
for cohesion, cluster in izip(self._cohesion, self._clusters):
for idx in cluster:
result[idx] = max(result[idx], cohesion)
return result
def parent(self, idx):
"""Returns the parent group index of the group with the given index
or C{None} if the given group is the root."""
return self._parent[idx]
def parents(self):
"""Returns the list of parent group indices for each group or C{None}
if the given group is the root."""
return self._parent[:]
def __plot__(self, context, bbox, palette, *args, **kwds):
"""Plots the cohesive block structure to the given Cairo context in
the given bounding box.
Since a L{CohesiveBlocks} instance is also a L{VertexCover}, keyword
arguments accepted by L{VertexCover.__plot__()} are also accepted here.
The only difference is that the vertices are colored according to their
maximal cohesions by default, and groups are marked by colored blobs
except the last group which encapsulates the whole graph.
See the documentation of L{VertexCover.__plot__()} for more details.
"""
prepare_groups = False
if "mark_groups" not in kwds:
if Configuration.instance()["plotting.mark_groups"]:
prepare_groups = True
elif kwds["mark_groups"] == True:
prepare_groups = True
if prepare_groups:
colors = [pair for pair in enumerate(self.cohesions())
if pair[1] > 1]
kwds["mark_groups"] = colors
if "vertex_color" not in kwds:
kwds["vertex_color"] = self.max_cohesions()
return VertexCover.__plot__(self, context, bbox, palette, *args, **kwds)
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering):
"""Handles the mark_groups=... keyword argument in plotting methods of
clusterings.
This is an internal method, you shouldn't need to mess around with it.
Its purpose is to handle the extended semantics of the mark_groups=...
keyword argument in the C{__plot__} method of L{VertexClustering} and
L{VertexCover} instances, namely the feature that numeric IDs are resolved
to clusters automatically.
"""
# Handle the case of mark_groups = True, mark_groups containing a list or
# tuple of cluster IDs, and and mark_groups yielding (cluster ID, color)
# pairs
if mark_groups is True:
group_iter = ((group, color) for color, group in enumerate(clustering))
elif isinstance(mark_groups, dict):
group_iter = mark_groups.iteritems()
elif hasattr(mark_groups, "__getitem__") and hasattr(mark_groups, "__len__"):
# Lists, tuples
try:
first = mark_groups[0]
except:
# Hmm. Maybe not a list or tuple?
first = None
if first is not None:
# Okay. Is the first element of the list a single number?
if isinstance(first, (int, long)):
# Yes. Seems like we have a list of cluster indices.
# Assign color indices automatically.
group_iter = ((group, color)
for color, group in enumerate(mark_groups))
else:
# No. Seems like we have good ol' group-color pairs.
group_iter = mark_groups
else:
group_iter = mark_groups
elif hasattr(mark_groups, "__iter__"):
# Iterators etc
group_iter = mark_groups
else:
group_iter = {}.iteritems()
def cluster_index_resolver():
for group, color in group_iter:
if isinstance(group, (int, long)):
group = clustering[group]
yield group, color
return cluster_index_resolver()
##############################################################
def _prepare_community_comparison(comm1, comm2, remove_none=False):
"""Auxiliary method that takes two community structures either as
membership lists or instances of L{Clustering}, and returns a
tuple whose two elements are membership lists.
This is used by L{compare_communities} and L{split_join_distance}.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}
or C{comm2} will result in an exception. If C{remove_none} is C{True},
C{None} values are filtered away and only the remaining lists are
compared.
"""
def _ensure_list(obj):
if isinstance(obj, Clustering):
return obj.membership
return list(obj)
vec1, vec2 = _ensure_list(comm1), _ensure_list(comm2)
if len(vec1) != len(vec2):
raise ValueError("the two membership vectors must be equal in length")
if remove_none and (None in vec1 or None in vec2):
idxs_to_remove = [i for i in xrange(len(vec1)) \
if vec1[i] is None or vec2[i] is None]
idxs_to_remove.reverse()
n = len(vec1)
for i in idxs_to_remove:
n -= 1
vec1[i], vec1[n] = vec1[n], vec1[i]
vec2[i], vec2[n] = vec2[n], vec2[i]
del vec1[n:]
del vec2[n:]
return vec1, vec2
def compare_communities(comm1, comm2, method="vi", remove_none=False):
"""Compares two community structures using various distance measures.
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param method: the measure to use. C{"vi"} or C{"meila"} means the
variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}
means the normalized mutual information as defined by Danon et al (2005),
C{"split-join"} means the split-join distance of van Dongen (2000),
C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}
means the adjusted Rand index of Hubert and Arabie (1985).
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the calculated measure.
@newfield ref: Reference
@ref: Meila M: Comparing clusterings by the variation of information.
In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel
Machines: 16th Annual Conference on Computational Learning Theory
and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.
Lecture Notes in Computer Science, vol. 2777, Springer, 2003.
ISBN: 978-3-540-40720-1.
@ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community
structure identification. J Stat Mech P09008, 2005.
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@ref: Rand WM: Objective criteria for the evaluation of clustering
methods. J Am Stat Assoc 66(336):846-850, 1971.
@ref: Hubert L and Arabie P: Comparing partitions. Journal of
Classification 2:193-218, 1985.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False):
"""Calculates the split-join distance between two community structures.
The split-join distance is a distance measure defined on the space of
partitions of a given set. It is the sum of the projection distance of
one partition from the other and vice versa, where the projection
number of A from B is if calculated as follows:
1. For each set in A, find the set in B with which it has the
maximal overlap, and take note of the size of the overlap.
2. Take the sum of the maximal overlap sizes for each set in A.
3. Subtract the sum from M{n}, the number of elements in the
partition.
Note that the projection distance is asymmetric, that's why it has to be
calculated in both directions and then added together. This function
returns the projection distance of C{comm1} from C{comm2} and the
projection distance of C{comm2} from C{comm1}, and returns them in a pair.
The actual split-join distance is the sum of the two distances. The reason
why it is presented this way is that one of the elements being zero then
implies that one of the partitions is a subpartition of the other (and if
it is close to zero, then one of the partitions is close to being a
subpartition of the other).
@param comm1: the first community structure as a membership list or
as a L{Clustering} object.
@param comm2: the second community structure as a membership list or
as a L{Clustering} object.
@param remove_none: whether to remove C{None} entries from the membership
lists. This is handy if your L{Clustering} object was constructed using
L{VertexClustering.FromAttribute} using an attribute which was not defined
for all the vertices. If C{remove_none} is C{False}, a C{None} entry in
either C{comm1} or C{comm2} will result in an exception. If C{remove_none}
is C{True}, C{None} values are filtered away and only the remaining lists
are compared.
@return: the projection distance of C{comm1} from C{comm2} and vice versa
in a tuple. The split-join distance is the sum of the two.
@newfield ref: Reference
@ref: van Dongen D: Performance criteria for graph clustering and Markov
cluster experiments. Technical Report INS-R0012, National Research
Institute for Mathematics and Computer Science in the Netherlands,
Amsterdam, May 2000.
@see: L{compare_communities()} with C{method = "split-join"} if you are
not interested in the individual projection distances but only the
sum of them.
"""
import igraph._igraph
vec1, vec2 = _prepare_community_comparison(comm1, comm2, remove_none)
return igraph._igraph._split_join_distance(vec1, vec2)
| 39.722623 | 95 | 0.622215 | [
"MIT"
] | tuandnvn/ecat_learning | igraph/clustering.py | 66,025 | Python |
# Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for hts_verbose."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
from absl.testing import absltest
from third_party.nucleus.io.python import hts_verbose
class HtsVerbose(absltest.TestCase):
def test_set(self):
hts_verbose.set(hts_verbose.htsLogLevel.HTS_LOG_TRACE)
level = hts_verbose.get()
self.assertEqual(level, hts_verbose.htsLogLevel.HTS_LOG_TRACE)
hts_verbose.set(hts_verbose.htsLogLevel.HTS_LOG_INFO)
level = hts_verbose.get()
self.assertEqual(level, hts_verbose.htsLogLevel.HTS_LOG_INFO)
hts_verbose.set(hts_verbose.htsLogLevel['HTS_LOG_DEBUG'])
level = hts_verbose.get()
self.assertEqual(level, hts_verbose.htsLogLevel.HTS_LOG_DEBUG)
if __name__ == '__main__':
absltest.main()
| 38.746032 | 78 | 0.781237 | [
"BSD-3-Clause"
] | Abzollo/deepvariant | third_party/nucleus/io/python/hts_verbose_test.py | 2,441 | Python |
"""Tests for the sensor drivers."""
import pytest
import mock
from pytest_lazyfixture import lazy_fixture # type: ignore[import]
from unittest.mock import patch
from mock.mock import AsyncMock
from tests.conftest import MockCanMessageNotifier
from opentrons_hardware.sensors import fdc1004, hdc2080, mmr920C04, sensor_abc
from opentrons_hardware.firmware_bindings import ArbitrationId, ArbitrationIdParts
from opentrons_hardware.firmware_bindings.constants import SensorType, SensorId, NodeId
from opentrons_hardware.drivers.can_bus.can_messenger import (
CanMessenger,
)
from opentrons_hardware.firmware_bindings.utils import (
UInt8Field,
UInt16Field,
UInt32Field,
Int32Field,
)
from opentrons_hardware.firmware_bindings.messages.message_definitions import (
BaselineSensorRequest,
ReadFromSensorRequest,
SetSensorThresholdRequest,
WriteToSensorRequest,
ReadFromSensorResponse,
SensorThresholdResponse,
BindSensorOutputRequest,
PeripheralStatusRequest,
PeripheralStatusResponse,
)
from opentrons_hardware.firmware_bindings.messages.messages import MessageDefinition
from opentrons_hardware.firmware_bindings.messages.payloads import (
BaselineSensorRequestPayload,
ReadFromSensorRequestPayload,
WriteToSensorRequestPayload,
ReadFromSensorResponsePayload,
SensorThresholdResponsePayload,
BindSensorOutputRequestPayload,
PeripheralStatusResponsePayload,
)
from opentrons_hardware.firmware_bindings.messages.fields import (
SensorTypeField,
SensorIdField,
SensorOutputBindingField,
)
from opentrons_hardware.sensors.utils import SensorDataType
from opentrons_hardware.firmware_bindings.constants import SensorOutputBinding
@pytest.fixture
def pressure_sensor() -> mmr920C04.PressureSensor:
"""Fixture for pressure sensor driver."""
return mmr920C04.PressureSensor()
@pytest.fixture
def capacitive_sensor() -> fdc1004.CapacitiveSensor:
"""Fixture for capacitive sensor driver."""
return fdc1004.CapacitiveSensor()
@pytest.fixture
def temperature_sensor() -> hdc2080.EnvironmentSensor:
"""Fixture for temperature sensor driver."""
return hdc2080.EnvironmentSensor(SensorType.temperature)
@pytest.fixture
def humidity_sensor() -> hdc2080.EnvironmentSensor:
"""Fixture for humidity sensor driver."""
return hdc2080.EnvironmentSensor(SensorType.humidity)
@pytest.mark.parametrize(
argnames=["sensor", "node", "message"],
argvalues=[
[
lazy_fixture("pressure_sensor"),
NodeId.pipette_left,
BaselineSensorRequest(
payload=BaselineSensorRequestPayload(
sensor=SensorTypeField(SensorType.pressure),
sensor_id=SensorIdField(SensorId.S0),
sample_rate=UInt16Field(10),
)
),
],
[
lazy_fixture("capacitive_sensor"),
NodeId.pipette_left,
BaselineSensorRequest(
payload=BaselineSensorRequestPayload(
sensor=SensorTypeField(SensorType.capacitive),
sensor_id=SensorIdField(SensorId.S0),
sample_rate=UInt16Field(10),
)
),
],
],
)
async def test_polling(
sensor: sensor_abc.AbstractAdvancedSensor, node: NodeId, message: MessageDefinition
) -> None:
"""Test that a polling function sends the expected message."""
messenger = mock.AsyncMock(spec=CanMessenger)
await sensor.get_baseline(messenger, node, 10, 10)
messenger.send.assert_called_once_with(node_id=node, message=message)
@pytest.mark.parametrize(
argnames=["sensor"],
argvalues=[
[lazy_fixture("pressure_sensor")],
[lazy_fixture("capacitive_sensor")],
],
)
async def test_receive_data_polling(
sensor: sensor_abc.AbstractAdvancedSensor,
mock_messenger: mock.AsyncMock,
can_message_notifier: MockCanMessageNotifier,
) -> None:
"""Test that data is received from the polling function."""
def responder(node_id: NodeId, message: MessageDefinition) -> None:
"""Message responder."""
can_message_notifier.notify(
ReadFromSensorResponse(
payload=ReadFromSensorResponsePayload(
sensor_data=Int32Field(256),
sensor_id=SensorIdField(SensorId.S0),
sensor=SensorTypeField(sensor._sensor_type),
)
),
ArbitrationId(
parts=ArbitrationIdParts(
message_id=ReadFromSensorResponse.message_id,
node_id=NodeId.host,
function_code=0,
originating_node_id=node_id,
)
),
)
mock_messenger.send.side_effect = responder
return_data = await sensor.get_baseline(mock_messenger, NodeId.pipette_left, 10, 10)
assert return_data == SensorDataType.build([0x0, 0x1, 0x0])
@pytest.mark.parametrize(
argnames=["sensor", "node", "message"],
argvalues=[
[
lazy_fixture("pressure_sensor"),
NodeId.pipette_left,
WriteToSensorRequest(
payload=WriteToSensorRequestPayload(
sensor=SensorTypeField(SensorType.pressure),
sensor_id=SensorIdField(SensorId.S0),
data=UInt32Field(SensorDataType.build([0x2, 0x2, 0x0, 0x0]).to_int),
reg_address=UInt8Field(0x0),
)
),
],
[
lazy_fixture("capacitive_sensor"),
NodeId.pipette_left,
WriteToSensorRequest(
payload=WriteToSensorRequestPayload(
sensor=SensorTypeField(SensorType.capacitive),
sensor_id=SensorIdField(SensorId.S0),
data=UInt32Field(SensorDataType.build([0x2, 0x2, 0x0, 0x0]).to_int),
reg_address=UInt8Field(0x0),
)
),
],
[
lazy_fixture("temperature_sensor"),
NodeId.pipette_left,
WriteToSensorRequest(
payload=WriteToSensorRequestPayload(
sensor=SensorTypeField(SensorType.temperature),
sensor_id=SensorIdField(SensorId.S0),
data=UInt32Field(SensorDataType.build([0x2, 0x2, 0x0, 0x0]).to_int),
reg_address=UInt8Field(0x0),
)
),
],
[
lazy_fixture("humidity_sensor"),
NodeId.pipette_left,
WriteToSensorRequest(
payload=WriteToSensorRequestPayload(
sensor=SensorTypeField(SensorType.humidity),
sensor_id=SensorIdField(SensorId.S0),
data=UInt32Field(SensorDataType.build([0x2, 0x2, 0x0, 0x0]).to_int),
reg_address=UInt8Field(0x0),
)
),
],
],
)
async def test_write(
sensor: sensor_abc.AbstractAdvancedSensor, node: NodeId, message: MessageDefinition
) -> None:
"""Check that writing sensor data is successful."""
data = SensorDataType.build([0x2, 0x2, 0x0, 0x0])
messenger = mock.AsyncMock(spec=CanMessenger)
await sensor.write(messenger, NodeId.pipette_left, data)
messenger.send.assert_called_once_with(node_id=node, message=message)
@pytest.mark.parametrize(
argnames=["sensor", "node", "message"],
argvalues=[
[
lazy_fixture("pressure_sensor"),
NodeId.pipette_left,
ReadFromSensorRequest(
payload=ReadFromSensorRequestPayload(
sensor=SensorTypeField(SensorType.pressure),
sensor_id=SensorIdField(SensorId.S0),
offset_reading=UInt8Field(False),
)
),
],
[
lazy_fixture("capacitive_sensor"),
NodeId.pipette_left,
ReadFromSensorRequest(
payload=ReadFromSensorRequestPayload(
sensor=SensorTypeField(SensorType.capacitive),
sensor_id=SensorIdField(SensorId.S0),
offset_reading=UInt8Field(False),
)
),
],
[
lazy_fixture("temperature_sensor"),
NodeId.pipette_left,
ReadFromSensorRequest(
payload=ReadFromSensorRequestPayload(
sensor=SensorTypeField(SensorType.temperature),
sensor_id=SensorIdField(SensorId.S0),
offset_reading=UInt8Field(False),
)
),
],
[
lazy_fixture("humidity_sensor"),
NodeId.pipette_left,
ReadFromSensorRequest(
payload=ReadFromSensorRequestPayload(
sensor=SensorTypeField(SensorType.humidity),
sensor_id=SensorIdField(SensorId.S0),
offset_reading=UInt8Field(False),
)
),
],
],
)
async def test_read(
sensor: sensor_abc.AbstractAdvancedSensor, node: NodeId, message: MessageDefinition
) -> None:
"""Test that a read function sends the expected message."""
messenger = mock.AsyncMock(spec=CanMessenger)
await sensor.read(messenger, node, False)
messenger.send.assert_called_once_with(node_id=node, message=message)
@pytest.mark.parametrize(
argnames=["sensor"],
argvalues=[
[lazy_fixture("pressure_sensor")],
[lazy_fixture("capacitive_sensor")],
[lazy_fixture("temperature_sensor")],
[lazy_fixture("humidity_sensor")],
],
)
async def test_receive_data_read(
sensor: sensor_abc.AbstractAdvancedSensor,
mock_messenger: mock.AsyncMock,
can_message_notifier: MockCanMessageNotifier,
) -> None:
"""Test that data is received from the read function."""
def responder(node_id: NodeId, message: MessageDefinition) -> None:
"""Message responder."""
can_message_notifier.notify(
ReadFromSensorResponse(
payload=ReadFromSensorResponsePayload(
sensor_data=Int32Field(256),
sensor_id=SensorIdField(SensorId.S0),
sensor=SensorTypeField(sensor._sensor_type),
)
),
ArbitrationId(
parts=ArbitrationIdParts(
message_id=ReadFromSensorResponse.message_id,
node_id=NodeId.host,
function_code=0,
originating_node_id=node_id,
)
),
)
mock_messenger.send.side_effect = responder
return_data = await sensor.read(mock_messenger, NodeId.pipette_left, False, 10)
assert return_data == SensorDataType.build([0x0, 0x1, 0x0])
@pytest.mark.parametrize(
argnames=["sensor"],
argvalues=[[lazy_fixture("pressure_sensor")], [lazy_fixture("capacitive_sensor")]],
)
async def test_threshold(
sensor: sensor_abc.AbstractAdvancedSensor,
mock_messenger: mock.AsyncMock,
can_message_notifier: MockCanMessageNotifier,
) -> None:
"""Test that data is received from the threshold function."""
def responder(node_id: NodeId, message: MessageDefinition) -> None:
"""Message responder."""
if isinstance(message, SetSensorThresholdRequest):
can_message_notifier.notify(
SensorThresholdResponse(
payload=SensorThresholdResponsePayload(
threshold=message.payload.threshold,
sensor=SensorTypeField(sensor._sensor_type),
sensor_id=SensorIdField(SensorId.S0),
mode=message.payload.mode,
)
),
ArbitrationId(
parts=ArbitrationIdParts(
message_id=ReadFromSensorResponse.message_id,
node_id=NodeId.host,
function_code=0,
originating_node_id=node_id,
)
),
)
threshold = SensorDataType.build([0x0, 0x5])
mock_messenger.send.side_effect = responder
return_data = await sensor.send_zero_threshold(
mock_messenger, NodeId.pipette_left, threshold, 10
)
assert return_data == threshold
@pytest.mark.parametrize(
argnames=["node_id", "timeout", "sensor"],
argvalues=[
[NodeId.pipette_left, 1, lazy_fixture("pressure_sensor")],
[NodeId.pipette_left, 5, lazy_fixture("capacitive_sensor")],
],
)
async def test_bind_to_sync(
mock_messenger: mock.AsyncMock,
can_message_notifier: MockCanMessageNotifier,
sensor: sensor_abc.AbstractAdvancedSensor,
node_id: NodeId,
timeout: int,
) -> None:
"""Test for bind_to_sync.
Tests that bind_to_sync does in fact
send out a BindSensorOutputRequest.
"""
async with sensor.bind_output(
mock_messenger,
node_id,
SensorOutputBinding.sync,
):
mock_messenger.send.assert_called_with(
node_id=node_id,
message=BindSensorOutputRequest(
payload=BindSensorOutputRequestPayload(
sensor=SensorTypeField(sensor._sensor_type),
sensor_id=SensorIdField(SensorId.S0),
binding=SensorOutputBindingField(SensorOutputBinding.sync),
)
),
)
mock_messenger.send.assert_called_with(
node_id=node_id,
message=BindSensorOutputRequest(
payload=BindSensorOutputRequestPayload(
sensor=SensorTypeField(sensor._sensor_type),
sensor_id=SensorIdField(SensorId.S0),
binding=SensorOutputBindingField(SensorOutputBinding.none),
)
),
)
@pytest.mark.parametrize(
argnames=["sensor", "node_id", "timeout"],
argvalues=[
[lazy_fixture("capacitive_sensor"), NodeId.pipette_right, 10],
[lazy_fixture("pressure_sensor"), NodeId.pipette_left, 2],
],
)
async def test_get_baseline(
mock_messenger: mock.AsyncMock,
can_message_notifier: MockCanMessageNotifier,
sensor: sensor_abc.AbstractAdvancedSensor,
node_id: NodeId,
timeout: int,
) -> None:
"""Test for get_baseline.
Tests that a BaselineSensorRequest gets sent,
and reads ReadFromSensorResponse message containing the
correct information.
"""
def responder(node_id: NodeId, message: MessageDefinition) -> None:
"""Message responder."""
if isinstance(message, BaselineSensorRequest):
can_message_notifier.notify(
ReadFromSensorResponse(
payload=ReadFromSensorResponsePayload(
sensor=SensorTypeField(sensor._sensor_type),
sensor_id=SensorIdField(SensorId.S0),
sensor_data=Int32Field(50),
)
),
ArbitrationId(
parts=ArbitrationIdParts(
message_id=ReadFromSensorResponse.message_id,
node_id=node_id,
function_code=0,
originating_node_id=node_id,
)
),
)
mock_messenger.send.side_effect = responder
baseline = await sensor.get_baseline(mock_messenger, node_id, 100, timeout)
assert baseline == SensorDataType.build(Int32Field(50))
@pytest.mark.parametrize(
argnames=["sensor", "node_id", "timeout"],
argvalues=[
[lazy_fixture("capacitive_sensor"), NodeId.pipette_left, 2],
[lazy_fixture("pressure_sensor"), NodeId.pipette_right, 3],
],
)
async def test_debug_poll(
mock_messenger: mock.AsyncMock,
sensor: sensor_abc.AbstractAdvancedSensor,
node_id: NodeId,
timeout: int,
) -> None:
"""Test for debug poll."""
async with sensor.bind_output(mock_messenger, node_id, SensorOutputBinding.report):
for i in range(2):
with patch.object(
sensor._scheduler,
"_wait_for_response",
new=AsyncMock(return_value=SensorDataType.build(50)),
):
data = await sensor.get_report(node_id, mock_messenger, timeout)
assert data == SensorDataType.build(Int32Field(50))
mock_messenger.send.assert_called_with(
node_id=node_id,
message=BindSensorOutputRequest(
payload=BindSensorOutputRequestPayload(
sensor=SensorTypeField(sensor._sensor_type),
sensor_id=SensorIdField(SensorId.S0),
binding=SensorOutputBindingField(SensorOutputBinding.none),
)
),
)
@pytest.mark.parametrize(
argnames=["sensor", "node_id", "timeout"],
argvalues=[
[lazy_fixture("capacitive_sensor"), NodeId.pipette_left, 2],
[lazy_fixture("pressure_sensor"), NodeId.pipette_right, 3],
[lazy_fixture("temperature_sensor"), NodeId.pipette_left, 2],
[lazy_fixture("humidity_sensor"), NodeId.pipette_right, 2],
],
)
async def test_peripheral_status(
mock_messenger: mock.AsyncMock,
can_message_notifier: MockCanMessageNotifier,
sensor: sensor_abc.AbstractAdvancedSensor,
node_id: NodeId,
timeout: int,
) -> None:
"""Test for getting peripheral device status."""
def responder(node_id: NodeId, message: MessageDefinition) -> None:
"""Message responder."""
if isinstance(message, PeripheralStatusRequest):
can_message_notifier.notify(
PeripheralStatusResponse(
payload=PeripheralStatusResponsePayload(
sensor=SensorTypeField(sensor._sensor_type),
sensor_id=SensorIdField(SensorId.S0),
status=UInt8Field(0x1),
)
),
ArbitrationId(
parts=ArbitrationIdParts(
message_id=ReadFromSensorResponse.message_id,
node_id=node_id,
function_code=0,
originating_node_id=node_id,
)
),
)
mock_messenger.send.side_effect = responder
status = await sensor.get_device_status(mock_messenger, node_id, timeout)
assert status
| 35.263757 | 88 | 0.619727 | [
"Apache-2.0"
] | Opentrons/protocol_framework | hardware/tests/opentrons_hardware/sensors/test_sensor_drivers.py | 18,584 | Python |
import pytest
def test_load_module():
__import__("modules.core.disk")
| 12.666667 | 35 | 0.736842 | [
"MIT"
] | Aliuakbar/bumblebee-status | tests/modules/core/test_disk.py | 76 | Python |
"""
Common functions for tests
"""
__author__ = "Dan Gunter <[email protected]>"
__date__ = "10/29/13"
# Stdlib
import json
import logging
import os
import subprocess
import sys
import tempfile
import traceback
import unittest
# Third-party
from mongomock import MongoClient
import pymongo
# Package
from pymatgen.db.query_engine import QueryEngine
from pymatgen.db.builders.incr import CollectionTracker
_log = logging.getLogger("pymatgen.db.tests")
TEST_FILES_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"..", "..", "..", "test_files",
)
def has_mongo():
"""Determine if MongoDB is up and usable"""
if os.environ.get("MP_FAKEMONGO"):
mongo = False
else:
try:
pymongo.MongoClient()
mongo = True
except:
mongo = False
return mongo
class MockQueryEngine(QueryEngine):
"""Mock (fake) QueryEngine, unless a real connection works.
You can disable the attempt to do a real connection
by setting MP_FAKEMONGO to anything
"""
def __init__(
self,
host="127.0.0.1",
port=27017,
database="vasp",
user=None,
password=None,
collection="tasks",
aliases_config=None,
default_properties=None,
):
if has_mongo():
try:
QueryEngine.__init__(
self,
host=host,
port=port,
database=database,
user=user,
password=password,
collection=collection,
aliases_config=aliases_config,
default_properties=default_properties,
)
_log.warning("Connected to real MongoDB at {}:{}".format(host, port))
return # actully connected! not mocked..
except:
_log.debug(
"Connection to real MongoDB at {}:{} failed. "
"This is normal; using mock.".format(host, port)
)
self.connection = MongoClient(host, port)
self.db = self.connection[database]
self._user, self._password = user, password
self.host = host
self.port = port
self.database_name = database
# colllection name is now a @property. the setter will set "self.collection" internally
self.collection_name = collection
self.set_aliases_and_defaults(
aliases_config=aliases_config, default_properties=default_properties
)
# -----------------------------------
# Component test classes / functions
# -----------------------------------
def get_component_logger(name, strm=sys.stdout):
log = logging.getLogger(name)
if "TEST_DEBUG" in os.environ:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
_h = logging.StreamHandler(strm)
log.addHandler(_h)
return log
class ComponentTest(unittest.TestCase):
DB = "testdb"
SRC = "source"
DST = "dest"
MGBUILD_CMD = ["mgbuild", "run"]
def setUp(self):
self.db = self.connect(True)
self.src, self.dst = self.db[self.SRC], self.db[self.DST]
self.src_conf, self.dst_conf = self.create_configs()
def mgbuild(self, args):
try:
s = subprocess.check_output(
self.MGBUILD_CMD + args, stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as err:
print("ERROR: {}".format(err.output))
raise
return s
def connect(self, clear=False):
"""Connect to Mongo DB
:return: pymongo Database
"""
c = pymongo.MongoClient()
db = c[self.DB]
if clear:
for coll in self.SRC, self.DST:
db[coll].remove()
tcoll = coll + "." + CollectionTracker.TRACKING_NAME
db[tcoll].remove() # remove tracking as well
return db
def get_record(self, i):
return {"number": i, "data": [1, 2, 3], "name": "mp-{:d}".format(i)}
def add_records(self, coll, n):
for i in range(n):
coll.insert_one(self.get_record(i))
def create_configs(self):
base = {
"host": "localhost",
"port": 27017,
"database": self.DB,
"collection": None,
}
files = []
for coll in (self.SRC, self.DST):
f = tempfile.NamedTemporaryFile(suffix=".json")
base["collection"] = coll
json.dump(base, f)
f.flush()
files.append(f)
return files
def tearDown(self):
pass
def run_command(self, args, options):
"""Run the command-line given by the list
in `args`, adding the dictionary given by
options as long-form --{key}=value pairs.
"""
for key, value in options:
args.append("--{}".format(key))
if value:
args.append(value)
return subprocess.call(args)
| 27.690217 | 95 | 0.556232 | [
"MIT"
] | chc273/pymatgen-db | pymatgen/db/tests/common.py | 5,095 | Python |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
from azure.storage.common._common_conversion import _to_str
class Container(object):
'''
Blob container class.
:ivar str name:
The name of the container.
:ivar metadata:
A dict containing name-value pairs associated with the container as metadata.
This var is set to None unless the include=metadata param was included
for the list containers operation. If this parameter was specified but the
container has no metadata, metadata will be set to an empty dictionary.
:vartype metadata: dict(str, str)
:ivar ContainerProperties properties:
System properties for the container.
'''
def __init__(self, name=None, props=None, metadata=None):
self.name = name
self.properties = props or ContainerProperties()
self.metadata = metadata
class ContainerProperties(object):
'''
Blob container's properties class.
:ivar datetime last_modified:
A datetime object representing the last time the container was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar LeaseProperties lease:
Stores all the lease information for the container.
'''
def __init__(self):
self.last_modified = None
self.etag = None
self.lease = LeaseProperties()
self.public_access = None
class Blob(object):
'''
Blob class.
:ivar str name:
Name of blob.
:ivar str snapshot:
A DateTime value that uniquely identifies the snapshot. The value of
this header indicates the snapshot version, and may be used in
subsequent requests to access the snapshot.
:ivar content:
Blob content.
:vartype content: str or bytes
:ivar BlobProperties properties:
Stores all the system properties for the blob.
:ivar metadata:
Name-value pairs associated with the blob as metadata.
'''
def __init__(self, name=None, snapshot=None, content=None, props=None, metadata=None):
self.name = name
self.snapshot = snapshot
self.content = content
self.properties = props or BlobProperties()
self.metadata = metadata
class BlobProperties(object):
'''
Blob Properties
:ivar str blob_type:
String indicating this blob's type.
:ivar datetime last_modified:
A datetime object representing the last time the blob was modified.
:ivar str etag:
The ETag contains a value that you can use to perform operations
conditionally.
:ivar int content_length:
The length of the content returned. If the entire blob was requested,
the length of blob in bytes. If a subset of the blob was requested, the
length of the returned subset.
:ivar str content_range:
Indicates the range of bytes returned in the event that the client
requested a subset of the blob.
:ivar int append_blob_committed_block_count:
(For Append Blobs) Number of committed blocks in the blob.
:ivar int page_blob_sequence_number:
(For Page Blobs) Sequence number for page blob used for coordinating
concurrent writes.
:ivar bool server_encrypted:
Set to true if the blob is encrypted on the server.
:ivar ~azure.storage.blob.models.CopyProperties copy:
Stores all the copy properties for the blob.
:ivar ~azure.storage.blob.models.ContentSettings content_settings:
Stores all the content settings for the blob.
:ivar ~azure.storage.blob.models.LeaseProperties lease:
Stores all the lease information for the blob.
:ivar StandardBlobTier blob_tier:
Indicates the access tier of the blob. The hot tier is optimized
for storing data that is accessed frequently. The cool storage tier
is optimized for storing data that is infrequently accessed and stored
for at least a month. The archive tier is optimized for storing
data that is rarely accessed and stored for at least six months
with flexible latency requirements.
:ivar datetime blob_tier_change_time:
Indicates when the access tier was last changed.
:ivar bool blob_tier_inferred:
Indicates whether the access tier was inferred by the service.
If false, it indicates that the tier was set explicitly.
'''
def __init__(self):
self.blob_type = None
self.last_modified = None
self.etag = None
self.content_length = None
self.content_range = None
self.append_blob_committed_block_count = None
self.page_blob_sequence_number = None
self.server_encrypted = None
self.copy = CopyProperties()
self.content_settings = ContentSettings()
self.lease = LeaseProperties()
self.blob_tier = None
self.blob_tier_change_time = None
self.blob_tier_inferred = False
class ContentSettings(object):
'''
Used to store the content settings of a blob.
:ivar str content_type:
The content type specified for the blob. If no content type was
specified, the default content type is application/octet-stream.
:ivar str content_encoding:
If the content_encoding has previously been set
for the blob, that value is stored.
:ivar str content_language:
If the content_language has previously been set
for the blob, that value is stored.
:ivar str content_disposition:
content_disposition conveys additional information about how to
process the response payload, and also can be used to attach
additional metadata. If content_disposition has previously been set
for the blob, that value is stored.
:ivar str cache_control:
If the cache_control has previously been set for
the blob, that value is stored.
:ivar str content_md5:
If the content_md5 has been set for the blob, this response
header is stored so that the client can check for message content
integrity.
'''
def __init__(
self, content_type=None, content_encoding=None,
content_language=None, content_disposition=None,
cache_control=None, content_md5=None):
self.content_type = content_type
self.content_encoding = content_encoding
self.content_language = content_language
self.content_disposition = content_disposition
self.cache_control = cache_control
self.content_md5 = content_md5
def _to_headers(self):
return {
'x-ms-blob-cache-control': _to_str(self.cache_control),
'x-ms-blob-content-type': _to_str(self.content_type),
'x-ms-blob-content-disposition': _to_str(self.content_disposition),
'x-ms-blob-content-md5': _to_str(self.content_md5),
'x-ms-blob-content-encoding': _to_str(self.content_encoding),
'x-ms-blob-content-language': _to_str(self.content_language),
}
class CopyProperties(object):
'''
Blob Copy Properties.
:ivar str id:
String identifier for the last attempted Copy Blob operation where this blob
was the destination blob. This header does not appear if this blob has never
been the destination in a Copy Blob operation, or if this blob has been
modified after a concluded Copy Blob operation using Set Blob Properties,
Put Blob, or Put Block List.
:ivar str source:
URL up to 2 KB in length that specifies the source blob used in the last attempted
Copy Blob operation where this blob was the destination blob. This header does not
appear if this blob has never been the destination in a Copy Blob operation, or if
this blob has been modified after a concluded Copy Blob operation using
Set Blob Properties, Put Blob, or Put Block List.
:ivar str status:
State of the copy operation identified by Copy ID, with these values:
success:
Copy completed successfully.
pending:
Copy is in progress. Check copy_status_description if intermittent,
non-fatal errors impede copy progress but don't cause failure.
aborted:
Copy was ended by Abort Copy Blob.
failed:
Copy failed. See copy_status_description for failure details.
:ivar str progress:
Contains the number of bytes copied and the total bytes in the source in the last
attempted Copy Blob operation where this blob was the destination blob. Can show
between 0 and Content-Length bytes copied.
:ivar datetime completion_time:
Conclusion time of the last attempted Copy Blob operation where this blob was the
destination blob. This value can specify the time of a completed, aborted, or
failed copy attempt.
:ivar str status_description:
only appears when x-ms-copy-status is failed or pending. Describes cause of fatal
or non-fatal copy operation failure.
'''
def __init__(self):
self.id = None
self.source = None
self.status = None
self.progress = None
self.completion_time = None
self.status_description = None
class LeaseProperties(object):
'''
Blob Lease Properties.
:ivar str status:
The lease status of the blob.
Possible values: locked|unlocked
:ivar str state:
Lease state of the blob.
Possible values: available|leased|expired|breaking|broken
:ivar str duration:
When a blob is leased, specifies whether the lease is of infinite or fixed duration.
'''
def __init__(self):
self.status = None
self.state = None
self.duration = None
class BlobPrefix(object):
'''
BlobPrefix objects may potentially returned in the blob list when
:func:`~azure.storage.blob.baseblobservice.BaseBlobService.list_blobs` is
used with a delimiter. Prefixes can be thought of as virtual blob directories.
:ivar str name: The name of the blob prefix.
'''
def __init__(self):
self.name = None
class BlobBlockState(object):
'''Block blob block types.'''
Committed = 'Committed'
'''Committed blocks.'''
Latest = 'Latest'
'''Latest blocks.'''
Uncommitted = 'Uncommitted'
'''Uncommitted blocks.'''
class BlobBlock(object):
'''
BlockBlob Block class.
:ivar str id:
Block id.
:ivar str state:
Block state.
Possible valuse: committed|uncommitted
:ivar int size:
Block size in bytes.
'''
def __init__(self, id=None, state=BlobBlockState.Latest):
self.id = id
self.state = state
def _set_size(self, size):
self.size = size
class BlobBlockList(object):
'''
Blob Block List class.
:ivar committed_blocks:
List of committed blocks.
:vartype committed_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`)
:ivar uncommitted_blocks:
List of uncommitted blocks.
:vartype uncommitted_blocks: list(:class:`~azure.storage.blob.models.BlobBlock`)
'''
def __init__(self):
self.committed_blocks = list()
self.uncommitted_blocks = list()
class PageRange(object):
'''
Page Range for page blob.
:ivar int start:
Start of page range in bytes.
:ivar int end:
End of page range in bytes.
:ivar bool is_cleared:
Indicates if a page range is cleared or not. Only applicable
for get_page_range_diff API.
'''
def __init__(self, start=None, end=None, is_cleared=False):
self.start = start
self.end = end
self.is_cleared = is_cleared
class ResourceProperties(object):
'''
Base response for a resource request.
:ivar str etag:
Opaque etag value that can be used to check if resource
has been modified.
:ivar datetime last_modified:
Datetime for last time resource was modified.
'''
def __init__(self):
self.last_modified = None
self.etag = None
class AppendBlockProperties(ResourceProperties):
'''
Response for an append block request.
:ivar int append_offset:
Position to start next append.
:ivar int committed_block_count:
Number of committed append blocks.
'''
def __init__(self):
super(ResourceProperties, self).__init__()
self.append_offset = None
self.committed_block_count = None
class PageBlobProperties(ResourceProperties):
'''
Response for a page request.
:ivar int sequence_number:
Identifer for page blobs to help handle concurrent writes.
'''
def __init__(self):
super(ResourceProperties, self).__init__()
self.sequence_number = None
class PublicAccess(object):
'''
Specifies whether data in the container may be accessed publicly and the level of access.
'''
OFF = 'off'
'''
Specifies that there is no public read access for both the container and blobs within the container.
Clients cannot enumerate the containers within the storage account as well as the blobs within the container.
'''
Blob = 'blob'
'''
Specifies public read access for blobs. Blob data within this container can be read
via anonymous request, but container data is not available. Clients cannot enumerate
blobs within the container via anonymous request.
'''
Container = 'container'
'''
Specifies full public read access for container and blob data. Clients can enumerate
blobs within the container via anonymous request, but cannot enumerate containers
within the storage account.
'''
class DeleteSnapshot(object):
'''
Required if the blob has associated snapshots. Specifies how to handle the snapshots.
'''
Include = 'include'
'''
Delete the base blob and all of its snapshots.
'''
Only = 'only'
'''
Delete only the blob's snapshots and not the blob itself.
'''
class BlockListType(object):
'''
Specifies whether to return the list of committed blocks, the list of uncommitted
blocks, or both lists together.
'''
All = 'all'
'''Both committed and uncommitted blocks.'''
Committed = 'committed'
'''Committed blocks.'''
Uncommitted = 'uncommitted'
'''Uncommitted blocks.'''
class SequenceNumberAction(object):
'''Sequence number actions.'''
Increment = 'increment'
'''
Increments the value of the sequence number by 1. If specifying this option,
do not include the x-ms-blob-sequence-number header.
'''
Max = 'max'
'''
Sets the sequence number to be the higher of the value included with the
request and the value currently stored for the blob.
'''
Update = 'update'
'''Sets the sequence number to the value included with the request.'''
class _LeaseActions(object):
'''Actions for a lease.'''
Acquire = 'acquire'
'''Acquire the lease.'''
Break = 'break'
'''Break the lease.'''
Change = 'change'
'''Change the lease ID.'''
Release = 'release'
'''Release the lease.'''
Renew = 'renew'
'''Renew the lease.'''
class _BlobTypes(object):
'''Blob type options.'''
AppendBlob = 'AppendBlob'
'''Append blob type.'''
BlockBlob = 'BlockBlob'
'''Block blob type.'''
PageBlob = 'PageBlob'
'''Page blob type.'''
class Include(object):
'''
Specifies the datasets to include in the blob list response.
:ivar ~azure.storage.blob.models.Include Include.COPY:
Specifies that metadata related to any current or previous Copy Blob operation
should be included in the response.
:ivar ~azure.storage.blob.models.Include Include.METADATA:
Specifies that metadata be returned in the response.
:ivar ~azure.storage.blob.models.Include Include.SNAPSHOTS:
Specifies that snapshots should be included in the enumeration.
:ivar ~azure.storage.blob.models.Include Include.UNCOMMITTED_BLOBS:
Specifies that blobs for which blocks have been uploaded, but which have not
been committed using Put Block List, be included in the response.
'''
def __init__(self, snapshots=False, metadata=False, uncommitted_blobs=False,
copy=False, _str=None):
'''
:param bool snapshots:
Specifies that snapshots should be included in the enumeration.
:param bool metadata:
Specifies that metadata be returned in the response.
:param bool uncommitted_blobs:
Specifies that blobs for which blocks have been uploaded, but which have
not been committed using Put Block List, be included in the response.
:param bool copy:
Specifies that metadata related to any current or previous Copy Blob
operation should be included in the response.
:param str _str:
A string representing the includes.
'''
if not _str:
_str = ''
components = _str.split(',')
self.snapshots = snapshots or ('snapshots' in components)
self.metadata = metadata or ('metadata' in components)
self.uncommitted_blobs = uncommitted_blobs or ('uncommittedblobs' in components)
self.copy = copy or ('copy' in components)
def __or__(self, other):
return Include(_str=str(self) + str(other))
def __add__(self, other):
return Include(_str=str(self) + str(other))
def __str__(self):
include = (('snapshots,' if self.snapshots else '') +
('metadata,' if self.metadata else '') +
('uncommittedblobs,' if self.uncommitted_blobs else '') +
('copy,' if self.copy else ''))
return include.rstrip(',')
Include.COPY = Include(copy=True)
Include.METADATA = Include(metadata=True)
Include.SNAPSHOTS = Include(snapshots=True)
Include.UNCOMMITTED_BLOBS = Include(uncommitted_blobs=True)
class BlobPermissions(object):
'''
BlobPermissions class to be used with
:func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_blob_shared_access_signature` API.
:ivar BlobPermissions BlobPermissions.ADD:
Add a block to an append blob.
:ivar BlobPermissions BlobPermissions.CREATE:
Write a new blob, snapshot a blob, or copy a blob to a new blob.
:ivar BlobPermissions BlobPermissions.DELETE:
Delete the blob.
:ivar BlobPermissions BlobPermissions.READ:
Read the content, properties, metadata and block list. Use the blob as the source of a copy operation.
:ivar BlobPermissions BlobPermissions.WRITE:
Create or write content, properties, metadata, or block list. Snapshot or lease
the blob. Resize the blob (page blob only). Use the blob as the destination of a
copy operation within the same account.
'''
def __init__(self, read=False, add=False, create=False, write=False,
delete=False, _str=None):
'''
:param bool read:
Read the content, properties, metadata and block list. Use the blob as
the source of a copy operation.
:param bool add:
Add a block to an append blob.
:param bool create:
Write a new blob, snapshot a blob, or copy a blob to a new blob.
:param bool write:
Create or write content, properties, metadata, or block list. Snapshot
or lease the blob. Resize the blob (page blob only). Use the blob as the
destination of a copy operation within the same account.
:param bool delete:
Delete the blob.
:param str _str:
A string representing the permissions.
'''
if not _str:
_str = ''
self.read = read or ('r' in _str)
self.add = add or ('a' in _str)
self.create = create or ('c' in _str)
self.write = write or ('w' in _str)
self.delete = delete or ('d' in _str)
def __or__(self, other):
return BlobPermissions(_str=str(self) + str(other))
def __add__(self, other):
return BlobPermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.read else '') +
('a' if self.add else '') +
('c' if self.create else '') +
('w' if self.write else '') +
('d' if self.delete else ''))
BlobPermissions.ADD = BlobPermissions(add=True)
BlobPermissions.CREATE = BlobPermissions(create=True)
BlobPermissions.DELETE = BlobPermissions(delete=True)
BlobPermissions.READ = BlobPermissions(read=True)
BlobPermissions.WRITE = BlobPermissions(write=True)
class ContainerPermissions(object):
'''
ContainerPermissions class to be used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.generate_container_shared_access_signature`
API and for the AccessPolicies used with :func:`~azure.storage.blob.baseblobservice.BaseBlobService.set_container_acl`.
:ivar ContainerPermissions ContainerPermissions.DELETE:
Delete any blob in the container. Note: You cannot grant permissions to
delete a container with a container SAS. Use an account SAS instead.
:ivar ContainerPermissions ContainerPermissions.LIST:
List blobs in the container.
:ivar ContainerPermissions ContainerPermissions.READ:
Read the content, properties, metadata or block list of any blob in the
container. Use any blob in the container as the source of a copy operation.
:ivar ContainerPermissions ContainerPermissions.WRITE:
For any blob in the container, create or write content, properties,
metadata, or block list. Snapshot or lease the blob. Resize the blob
(page blob only). Use the blob as the destination of a copy operation
within the same account. Note: You cannot grant permissions to read or
write container properties or metadata, nor to lease a container, with
a container SAS. Use an account SAS instead.
'''
def __init__(self, read=False, write=False, delete=False, list=False,
_str=None):
'''
:param bool read:
Read the content, properties, metadata or block list of any blob in the
container. Use any blob in the container as the source of a copy operation.
:param bool write:
For any blob in the container, create or write content, properties,
metadata, or block list. Snapshot or lease the blob. Resize the blob
(page blob only). Use the blob as the destination of a copy operation
within the same account. Note: You cannot grant permissions to read or
write container properties or metadata, nor to lease a container, with
a container SAS. Use an account SAS instead.
:param bool delete:
Delete any blob in the container. Note: You cannot grant permissions to
delete a container with a container SAS. Use an account SAS instead.
:param bool list:
List blobs in the container.
:param str _str:
A string representing the permissions.
'''
if not _str:
_str = ''
self.read = read or ('r' in _str)
self.write = write or ('w' in _str)
self.delete = delete or ('d' in _str)
self.list = list or ('l' in _str)
def __or__(self, other):
return ContainerPermissions(_str=str(self) + str(other))
def __add__(self, other):
return ContainerPermissions(_str=str(self) + str(other))
def __str__(self):
return (('r' if self.read else '') +
('w' if self.write else '') +
('d' if self.delete else '') +
('l' if self.list else ''))
ContainerPermissions.DELETE = ContainerPermissions(delete=True)
ContainerPermissions.LIST = ContainerPermissions(list=True)
ContainerPermissions.READ = ContainerPermissions(read=True)
ContainerPermissions.WRITE = ContainerPermissions(write=True)
class PremiumPageBlobTier(object):
'''
Specifies the page blob tier to set the blob to. This is only applicable to page
blobs on premium storage accounts.
Please take a look at https://docs.microsoft.com/en-us/azure/storage/storage-premium-storage#scalability-and-performance-targets
for detailed information on the corresponding IOPS and throughtput per PageBlobTier.
'''
P4 = 'P4'
''' P4 Tier '''
P6 = 'P6'
''' P6 Tier '''
P10 = 'P10'
''' P10 Tier '''
P20 = 'P20'
''' P20 Tier '''
P30 = 'P30'
''' P30 Tier '''
P40 = 'P40'
''' P40 Tier '''
P50 = 'P50'
''' P50 Tier '''
P60 = 'P60'
''' P60 Tier '''
class StandardBlobTier(object):
'''
Specifies the blob tier to set the blob to. This is only applicable for block blobs on standard storage accounts.
'''
Archive = 'Archive'
''' Archive '''
Cool = 'Cool'
''' Cool '''
Hot = 'Hot'
''' Hot '''
| 35.038874 | 149 | 0.655457 | [
"Apache-2.0"
] | RobertoPrevato/azure-storage-python | azure/storage/blob/models.py | 26,141 | Python |
import numpy
import matplotlib.pyplot as plt
import threading
import multiprocessing
from scipy import stats
class TestHist:
def hist(self, parameter_list):
x = numpy.random.uniform(0.0, 5.0, 100000)
plt.hist(x, 100)
plt.show()
y = numpy.random.normal(0.0, 5.0, 100000)
plt.hist(y, 100)
plt.show()
class TestScatter:
def scatter(self, parameter_list):
a = numpy.random.normal(5.0, 1.0, 1000)
b = numpy.random.normal(10.0, 2.0, 1000)
plt.scatter(a, b)
plt.show()
class TestLinearRegression:
def linear(self):
a = numpy.random.uniform(5.0, 1.0, 1000)
b = numpy.random.uniform(10.0, 2.0, 1000)
slope, intercept, r, p, std_err = stats.linregress(a, b)
print(slope, intercept, r, p, std_err )
mymodel = list(map(lambda xa : self.myfunc(xa,slope,intercept),a))
plt.scatter(a, b )
plt.plot(a, mymodel)
plt.show()
def myfunc(self,x, slope, intercept):
"""
input, slope, intercept
"""
return slope * x + intercept
linear =TestLinearRegression()
linear.linear()
# numpy.random.seed(12345678)
# x = numpy.random.random(10)
# y = 1.6*x + numpy.random.random(10)
# from scipy import stats
# x = [5,7,8,7,2,17,2,9,4,11,12,9,6]
# y = [99,86,87,88,111,86,103,87,94,78,77,85,86]
# slope, intercept, r, p, std_err = stats.linregress(x, y)
# def myfunc(x):
# return slope * x + intercept
# speed = myfunc(10)
# print(slope, intercept, r, p, std_err)
# print(speed) | 22.444444 | 74 | 0.584777 | [
"MIT"
] | badpaybad/mypython | helloword/ml.py | 1,616 | Python |
from rest_framework import permissions
from django_otp import user_has_device
from .utils import otp_is_verified
class IsOtpVerified(permissions.BasePermission):
"""
If user has verified TOTP device, require TOTP OTP.
"""
message = "You do not have permission to perform this action until you verify your OTP device."
def has_permission(self, request, view):
if request.user.is_two_factor_enabled and user_has_device(request.user):
return otp_is_verified(request)
else:
return True
| 32.058824 | 99 | 0.726606 | [
"MIT"
] | frankfern/django_rest_totp | accounts/permissions.py | 545 | Python |
class HsvFilter:
def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None,
sAdd=None, sSub=None, vAdd=None, vSub=None):
self.hMin = hMin
self.sMin = sMin
self.vMin = vMin
self.hMax = hMax
self.sMax = sMax
self.vMax = vMax
self.sAdd = sAdd
self.sSub = sSub
self.vAdd = vAdd
self.vSub = vSub
| 26.4375 | 88 | 0.541371 | [
"MIT"
] | gulyhan/CryptOfTheNecroDancerBot | computer_vision/hsv_filter.py | 423 | Python |
class Solution:
def solve(self, a, b, lower, upper):
a = [x**2 for x in a]
a.sort()
b = [x**2 for x in b]
b.sort()
ans = 0
for i in range(len(a)):
l,r = 0,len(b)-1
ans1 = -1
while l<=r:
mid = (l+r)//2
if a[i] + b[mid] >= lower:
ans1 = mid
r = mid-1
else:
l = mid+1
l,r = 0,len(b)-1
ans2 = -1
while l<=r:
mid = (l+r)//2
if a[i] + b[mid] <= upper:
ans2 = mid
l = mid+1
else:
r = mid-1
if -1 in [ans1,ans2]: continue
ans += ans2-ans1+1
return ans
| 21.684211 | 42 | 0.300971 | [
"MIT"
] | MdAbedin/binarysearch | 0981 Bounded Square Sums.py | 824 | Python |
# a simple python AES decrypter. Do not remember why I needed this, but nice to have. :)
pw = [255,155,28,115,214,107,206,49,172,65,62,174,19,27,70,79,88,47,108,226,209,225,243,218,126,141,55,107,38,57,78,91]
pw1 = b''
for i in pw:
pw1 += i.to_bytes(1, 'little')
import sys, hexdump, binascii
from Crypto.Cipher import AES
class AESCipher:
def __init__(self, key):
self.key = key
def decrypt(self, iv, data):
self.cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self.cipher.decrypt(data)
key = binascii.unhexlify("0602000000a400005253413100040000")
iv = binascii.unhexlify("0100010067244F436E6762F25EA8D704")
raw_un = AESCipher(key).decrypt(iv, pw1)
print(hexdump.hexdump(raw_un))
password = raw_un.decode('utf-16')
print(password)
| 27.857143 | 119 | 0.703846 | [
"MIT"
] | fjank/htb | htb/remote/decoder.py | 780 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleryImagesOperations:
"""GalleryImagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImage",
**kwargs
) -> "_models.GalleryImage":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(gallery_image, 'GalleryImage')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImage",
**kwargs
) -> AsyncLROPoller["_models.GalleryImage"]:
"""Create or update a gallery Image Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be created.
:type gallery_name: str
:param gallery_image_name: The name of the gallery Image Definition to be created or updated.
The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the
middle. The maximum length is 80 characters.
:type gallery_image_name: str
:param gallery_image: Parameters supplied to the create or update gallery image operation.
:type gallery_image: ~azure.mgmt.compute.v2019_03_01.models.GalleryImage
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryImage or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2019_03_01.models.GalleryImage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image=gallery_image,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
async def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs
) -> "_models.GalleryImage":
"""Retrieves information about a gallery Image Definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery from which the Image Definitions are
to be retrieved.
:type gallery_name: str
:param gallery_image_name: The name of the gallery Image Definition to be retrieved.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_03_01.models.GalleryImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete a gallery image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be deleted.
:type gallery_name: str
:param gallery_image_name: The name of the gallery Image Definition to be deleted.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
def list_by_gallery(
self,
resource_group_name: str,
gallery_name: str,
**kwargs
) -> AsyncIterable["_models.GalleryImageList"]:
"""List gallery Image Definitions in a gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery from which Image Definitions are to
be listed.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryImageList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2019_03_01.models.GalleryImageList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_gallery.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GalleryImageList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_gallery.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images'} # type: ignore
| 50.043182 | 212 | 0.671193 | [
"MIT"
] | Darkstar1t/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/aio/operations/_gallery_images_operations.py | 22,019 | Python |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We should just make these methods all "pure-virtual" and move
# all implementation out, into reflection.py for now.
"""Contains an abstract base class for protocol messages."""
__author__ = '[email protected] (Will Robinson)'
class Error(Exception):
"""Base error type for this module."""
pass
class DecodeError(Error):
"""Exception raised when deserializing messages."""
pass
class EncodeError(Error):
"""Exception raised when serializing messages."""
pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
"""
# TODO(robinson): Link to an HTML document here.
# TODO(robinson): Document that instances of this class will also
# have an Extensions attribute with __getitem__ and __setitem__.
# Again, not sure how to best convey this.
# TODO(robinson): Document that the class must also have a static
# RegisterExtension(extension_field) method.
# Not sure how to best express at this point.
# TODO(robinson): Document these fields and methods.
__slots__ = []
#: The :class:`google.protobuf.descriptor.Descriptor` for this message type.
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
"""Recursively compares two messages by value and structure."""
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def __unicode__(self):
"""Outputs a human-readable representation of the message."""
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg (Message): A message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg (Message): A message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design.
"""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
bool: The method returns True if the message is initialized (i.e. all of
its required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in `serialized` that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
Args:
serialized (bytes): Any object that allows us to call
``memoryview(serialized)`` to access a string of bytes using the
buffer interface.
Returns:
int: The number of bytes read from `serialized`.
For non-group messages, this will always be `len(serialized)`,
but for messages which are actually groups, this will
generally be less than `len(serialized)`, since we must
stop when we reach an ``END_GROUP`` tag. Note that if
we *do* stop because of an ``END_GROUP`` tag, the number
of bytes returned does not include the bytes
for the ``END_GROUP`` tag information.
Raises:
DecodeError: if the input cannot be parsed.
"""
# TODO(robinson): Document handling of unknown fields.
# TODO(robinson): When we switch to a helper, this will return None.
raise NotImplementedError
def ParseFromString(self, serialized):
"""Parse serialized protocol buffer data into this message.
Like :func:`MergeFromString()`, except we clear the object first.
"""
self.Clear()
return self.MergeFromString(serialized)
def SerializeToString(self, **kwargs):
"""Serializes the protocol message to a binary string.
Keyword Args:
deterministic (bool): If true, requests deterministic serialization
of the protobuf, with predictable ordering of map keys.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
EncodeError: if the message isn't initialized (see :func:`IsInitialized`).
"""
raise NotImplementedError
def SerializePartialToString(self, **kwargs):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Keyword Args:
deterministic (bool): If true, requests deterministic serialization
of the protobuf, with predictable ordering of map keys.
Returns:
bytes: A serialized representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for present fields.
A message field is non-empty if HasField() would return true. A singular
primitive field is non-empty if HasField() would return true in proto2 or it
is non zero in proto3. A repeated field is non-empty if it contains at least
one element. The fields are ordered by field number.
Returns:
list[tuple(FieldDescriptor, value)]: field descriptors and values
for all fields in the message which are not empty. The values vary by
field type.
"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message.
For a oneof group, checks if any field inside is set. Note that if the
field_name is not defined in the message descriptor, :exc:`ValueError` will
be raised.
Args:
field_name (str): The name of the field to check for presence.
Returns:
bool: Whether a value has been set for the named field.
Raises:
ValueError: if the `field_name` is not a member of this message.
"""
raise NotImplementedError
def ClearField(self, field_name):
"""Clears the contents of a given field.
Inside a oneof group, clears the field set. If the name neither refers to a
defined field or oneof group, :exc:`ValueError` is raised.
Args:
field_name (str): The name of the field to check for presence.
Raises:
ValueError: if the `field_name` is not a member of this message.
"""
raise NotImplementedError
def WhichOneof(self, oneof_group):
"""Returns the name of the field that is set inside a oneof group.
If no field is set, returns None.
Args:
oneof_group (str): the name of the oneof group to check.
Returns:
str or None: The name of the group that is set, or None.
Raises:
ValueError: no group with the given name exists
"""
raise NotImplementedError
def HasExtension(self, extension_handle):
"""Checks if a certain extension is present for this message.
Extensions are retrieved using the :attr:`Extensions` mapping (if present).
Args:
extension_handle: The handle for the extension to check.
Returns:
bool: Whether the extension is present for this message.
Raises:
KeyError: if the extension is repeated. Similar to repeated fields,
there is no separate notion of presence: a "not present" repeated
extension is an empty list.
"""
raise NotImplementedError
def ClearExtension(self, extension_handle):
"""Clears the contents of a given extension.
Args:
extension_handle: The handle for the extension to clear.
"""
raise NotImplementedError
def UnknownFields(self):
"""Returns the UnknownFieldSet.
Returns:
UnknownFieldSet: The unknown fields stored in this message.
"""
raise NotImplementedError
def DiscardUnknownFields(self):
"""Clears all fields in the :class:`UnknownFieldSet`.
This operation is recursive for nested message.
"""
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
Returns:
int: The number of bytes required to serialize this message.
"""
raise NotImplementedError
@classmethod
def FromString(cls, s):
raise NotImplementedError
@staticmethod
def RegisterExtension(extension_handle):
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
def __getstate__(self):
"""Support the pickle protocol."""
return dict(serialized=self.SerializePartialToString())
def __setstate__(self, state):
"""Support the pickle protocol."""
self.__init__()
serialized = state['serialized']
# On Python 3, using encoding='latin1' is required for unpickling
# protos pickled by Python 2.
if not isinstance(serialized, bytes):
serialized = serialized.encode('latin1')
self.ParseFromString(serialized)
def __reduce__(self):
message_descriptor = self.DESCRIPTOR
if message_descriptor.containing_type is None:
return type(self), (), self.__getstate__()
# the message type must be nested.
# Python does not pickle nested classes; use the symbol_database on the
# receiving end.
container = message_descriptor
return (_InternalConstructMessage, (container.full_name,),
self.__getstate__())
def _InternalConstructMessage(full_name):
"""Constructs a nested message."""
from google.protobuf import symbol_database # pylint:disable=g-import-not-at-top
return symbol_database.Default().GetSymbol(full_name)()
| 35.246445 | 84 | 0.699543 | [
"MIT"
] | JustinACoder/H22-GR3-UnrealAI | Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/google/protobuf/message.py | 14,874 | Python |
# coding: utf-8
# flake8: noqa
"""
Design feeds APIs
Various design feeds.<BR />[Endpoint] https://api.apitore.com/api/32 # noqa: E501
OpenAPI spec version: 0.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from swagger_client.models.feed_entry_entity import FeedEntryEntity
from swagger_client.models.feed_response_entity import FeedResponseEntity
| 23.8 | 86 | 0.756303 | [
"Apache-2.0"
] | apitore/apitore-sdk-python | 32/swagger_client/models/__init__.py | 476 | Python |
"""Build Environment used for isolation during sdist building
"""
import os
from distutils.sysconfig import get_python_lib
from sysconfig import get_paths
from pip._internal.utils.temp_dir import TempDirectory
class BuildEnvironment(object):
"""Creates and manages an isolated environment to install build deps
"""
def __init__(self, no_clean):
self._temp_dir = TempDirectory(kind="build-env")
self._no_clean = no_clean
@property
def path(self):
return self._temp_dir.path
def __enter__(self):
self._temp_dir.create()
self.save_path = os.environ.get('PATH', None)
self.save_pythonpath = os.environ.get('PYTHONPATH', None)
self.save_nousersite = os.environ.get('PYTHONNOUSERSITE', None)
install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix'
install_dirs = get_paths(install_scheme, vars={
'base': self.path,
'platbase': self.path,
})
scripts = install_dirs['scripts']
if self.save_path:
os.environ['PATH'] = scripts + os.pathsep + self.save_path
else:
os.environ['PATH'] = scripts + os.pathsep + os.defpath
# Note: prefer distutils' sysconfig to get the
# library paths so PyPy is correctly supported.
purelib = get_python_lib(plat_specific=0, prefix=self.path)
platlib = get_python_lib(plat_specific=1, prefix=self.path)
if purelib == platlib:
lib_dirs = purelib
else:
lib_dirs = purelib + os.pathsep + platlib
if self.save_pythonpath:
os.environ['PYTHONPATH'] = lib_dirs + os.pathsep + \
self.save_pythonpath
else:
os.environ['PYTHONPATH'] = lib_dirs
os.environ['PYTHONNOUSERSITE'] = '1'
return self.path
def __exit__(self, exc_type, exc_val, exc_tb):
if not self._no_clean:
self._temp_dir.cleanup()
def restore_var(varname, old_value):
if old_value is None:
os.environ.pop(varname, None)
else:
os.environ[varname] = old_value
restore_var('PATH', self.save_path)
restore_var('PYTHONPATH', self.save_pythonpath)
restore_var('PYTHONNOUSERSITE', self.save_nousersite)
def cleanup(self):
self._temp_dir.cleanup()
class NoOpBuildEnvironment(BuildEnvironment):
"""A no-op drop-in replacement for BuildEnvironment
"""
def __init__(self, no_clean):
pass
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def cleanup(self):
pass
| 28.827957 | 72 | 0.624767 | [
"MIT"
] | 0915318/PietonFunktie | inter/Lib/site-packages/pip-10.0.1-py3.7.egg/pip/_internal/build_env.py | 2,681 | Python |
# Copyright 2022 Aleksandr Soloshenko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from django.conf import settings
from storages.backends.azure_storage import AzureStorage
# Create your models here.
class PhotoStorage(AzureStorage):
account_name = os.getenv("AZURE_ACCOUNT_NAME")
account_key = os.getenv("AZURE_ACCOUNT_KEY")
azure_container = os.getenv("AZURE_CONTAINER")
expiration_secs = None
photoStorage = PhotoStorage()
| 34.178571 | 74 | 0.772205 | [
"Apache-2.0"
] | capcom6/django-bread | recipes/storage.py | 957 | Python |
from flask import render_template
from. import main
@main.app_errorhandler(404)
def four_ow_four(error):
return render_template('404.html')
| 21.285714 | 42 | 0.771812 | [
"Unlicense"
] | suad7/News-highlight | app/main/errors.py | 149 | Python |
Subsets and Splits