code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from tpe_parking import ParkingLotInfoProvider
def main():
info_provider = ParkingLotInfoProvider()
#info_provider.update_db()
my_location = (25.041340, 121.611751)
parks = info_provider.find_parking_lot_by_coordinate(my_location, 1000)
parks = info_provider.find_parking_lot('信義區')
for park in parks:
#print('[{0}] {1}, Entrance:{2}'.format(park['area'], park['name'], [x['Addresss'] for x in park['Entrancecoord']['EntrancecoordInfo']]))
park_available_space = info_provider.find_available_parking_space(park['id'])
if park_available_space:
print('[{0}] {1}(park_id: {2}) | Spaces available: {3}'.format(park['area'], park['name'], park['id'], park_available_space['availablecar']))
else:
print('[{0}] {1}(park_id: {2})'.format(park['area'], park['name'], park['id']))
if __name__ == '__main__':
main()
| shyboynccu/tpe_parking | example.py | Python | mit | 913 |
from .goods_insert import SellCreateView
from .goods_list import GoodsListView
from .goods_detail import GoodsDetailView
from .goods_modify import SellUpdateView
from .order_page import OrderPageView
from .order_check import OrderCheckView
from .order_complete import OrderCompleteView
from .attach_comment import CommentAttachView
from .send_email import send_email
| yevgnenll/but | but/trades/views/__init__.py | Python | mit | 367 |
class MiniMatch:
_defaults = dict(
follow_links=True,
list_directories=True
)
def __init__(self, *patterns, **kwargs):
self._patterns = patterns
self.__dict__.update(MiniMatch._defaults)
self.__dict__.update(kwargs)
| spiralx/py-minimatch | minimatch/match.py | Python | mit | 270 |
"""
@name: Modules/House/Security/login.py
@author: D. Brian Kimmel
@contact: [email protected]
@copyright: (c) 2019-2020 by D. Brian Kimmel
@note: Created on Jul 23, 2019
@license: MIT License
@summary: Handle logging in.
"""
# Import system type stuff
# Import PyMh files and modules.
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.Security ')
class LoginInformation:
"""
"""
def __init__(self):
self.Name = None # Username
self.Password = None
class LocalConfig:
"""
"""
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
def load_name_password(self, p_config):
"""
"""
l_required = ['Name', 'Password']
l_obj = LoginInformation()
for l_key, l_value in p_config.items():
setattr(l_obj, l_key, l_value)
for l_key in [l_attr for l_attr in dir(l_obj) if not l_attr.startswith('_') and not callable(getattr(l_obj, l_attr))]:
if getattr(l_obj, l_key) == None and l_key in l_required:
LOG.warning('Pandora Yaml is missing an entry for "{}"'.format(l_key))
return l_obj
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/Core/Config/login.py | Python | mit | 1,248 |
# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from hy.errors import HyError
class LexException(HyError):
"""Error during the Lexing of a Hython expression."""
pass
class PrematureEndOfInput(LexException):
"""We got a premature end of input"""
pass
| hcarvalhoalves/hy | hy/lex/exceptions.py | Python | mit | 1,352 |
old = '-1.py'
import os
os.remove(old)
data = 'old = \'' + str(int(old[:-3]) + 1) + '.py\'\n'
for line in file(str(int(old[:-3]) + 1) + '.py').readlines()[1:]:
data += line
file(str(int(old[:-3]) + 2) + '.py', 'w').write(data)
os.startfile(str(int(old[:-3]) + 2) + '.py')
| ActiveState/code | recipes/Python/440636_eight_most_annoying_lines_code_I_ever_did/recipe-440636.py | Python | mit | 276 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-06-25 17:50
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cadastro', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Lancamento',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('data_vencimento', models.DateField(blank=True, null=True)),
('data_pagamento', models.DateField(blank=True, null=True)),
('descricao', models.CharField(max_length=255)),
('valor_total', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('abatimento', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('juros', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('valor_liquido', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('movimentar_caixa', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='MovimentoCaixa',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('data_movimento', models.DateField(blank=True, null=True)),
('saldo_inicial', models.DecimalField(
decimal_places=2, default=Decimal('0.00'), max_digits=13)),
('saldo_final', models.DecimalField(
decimal_places=2, default=Decimal('0.00'), max_digits=13)),
('entradas', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
('saidas', models.DecimalField(decimal_places=2, default=Decimal(
'0.00'), max_digits=13, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])),
],
),
migrations.CreateModel(
name='PlanoContasGrupo',
fields=[
('id', models.AutoField(auto_created=True,
primary_key=True, serialize=False, verbose_name='ID')),
('codigo', models.CharField(max_length=6)),
('tipo_grupo', models.CharField(choices=[
('0', 'Entrada'), ('1', 'Saída')], max_length=1)),
('descricao', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Entrada',
fields=[
('lancamento_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE,
parent_link=True, primary_key=True, serialize=False, to='financeiro.Lancamento')),
('status', models.CharField(choices=[
('0', 'Recebida'), ('1', 'A receber'), ('2', 'Atrasada')], default='1', max_length=1)),
('cliente', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='conta_cliente', to='cadastro.Cliente')),
],
bases=('financeiro.lancamento',),
),
migrations.CreateModel(
name='PlanoContasSubgrupo',
fields=[
('planocontasgrupo_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE,
parent_link=True, primary_key=True, serialize=False, to='financeiro.PlanoContasGrupo')),
('grupo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='plano_subgrupo', to='financeiro.PlanoContasGrupo')),
],
bases=('financeiro.planocontasgrupo',),
),
migrations.CreateModel(
name='Saida',
fields=[
('lancamento_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE,
parent_link=True, primary_key=True, serialize=False, to='financeiro.Lancamento')),
('status', models.CharField(choices=[
('0', 'Paga'), ('1', 'A pagar'), ('2', 'Atrasada')], default='1', max_length=1)),
('fornecedor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='conta_fornecedor', to='cadastro.Fornecedor')),
('grupo_plano', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='grupo_plano_pagamento', to='financeiro.PlanoContasGrupo')),
],
bases=('financeiro.lancamento',),
),
migrations.AddField(
model_name='lancamento',
name='conta_corrente',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='conta_corrente_conta', to='cadastro.Banco'),
),
migrations.AddField(
model_name='lancamento',
name='movimento_caixa',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='movimento_caixa_lancamento', to='financeiro.MovimentoCaixa'),
),
migrations.AddField(
model_name='entrada',
name='grupo_plano',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='grupo_plano_recebimento', to='financeiro.PlanoContasGrupo'),
),
]
| thiagopena/djangoSIGE | djangosige/apps/financeiro/migrations/0001_initial.py | Python | mit | 6,744 |
#!/usr/bin/env python3
#Copyright (C) 2013 by Ngan Nguyen
# Copyright (C) 2012-2019 by UCSC Computational Genomics Lab
#
#Released under the MIT license, see LICENSE.txt
"""Snake tracks
"""
from optparse import OptionGroup
import re
def addSnakeOptions(parser):
group = parser.add_argument_group("SNAKE TRACKS", "Snake track options")
group.add_argument('--selfAlignmentSnakes', dest="selfAlignmentTrack",
help="Produce a self-alignment snake track for every genome",
action="store_true", default=False)
group = parser.add_argument_group(group)
def writeTrackDb_snakes(f, halfile, genomes, subgenomes, currgenome, properName, snpwidth=None, doSelfAlignment=False):
for i, genome in enumerate(genomes):
if not doSelfAlignment and genome == currgenome: #current genome
continue
#SNAKE TRACKS
genomeProperName = genome
if genome in properName:
genomeProperName = properName[genome]
if genome == currgenome:
genomeProperName += " (self)"
f.write("\t\ttrack snake%s\n" %genome)
f.write("\t\tlongLabel %s\n" %genomeProperName)
f.write("\t\tshortLabel %s\n" %genomeProperName)
f.write("\t\totherSpecies %s\n" %genome)
if genome in subgenomes:
f.write("\t\tvisibility full\n")
f.write("\t\tparent hubCentralAlignments\n")
else:
f.write("\t\tvisibility hide\n")
f.write("\t\tparent hubCentralAlignments off\n")
if snpwidth:
f.write("\t\tshowSnpWidth %d\n" % snpwidth)
f.write("\t\tpriority %d\n" %(i + 2))
f.write("\t\tbigDataUrl %s\n" % halfile)
f.write("\t\ttype halSnake\n")
f.write("\t\tgroup snake\n")
f.write("\t\tsubGroups view=Snake orgs=%s\n" %genome)
f.write("\n")
| glennhickey/hal | assemblyHub/snakeTrack.py | Python | mit | 1,869 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editorial', '0064_auto_20171202_2016'),
]
operations = [
migrations.RenameField(
model_name='assignment',
old_name='contributor',
new_name='contractor',
),
]
| ProjectFacet/facet | project/editorial/migrations/0065_auto_20171202_2022.py | Python | mit | 402 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clients', '0010_auto_20151024_2343'),
]
operations = [
migrations.AddField(
model_name='client',
name='napis_id',
field=models.CharField(blank=True, max_length=11, null=True),
),
]
| deafhhs/adapt | clients/migrations/0011_client_napis_id.py | Python | mit | 425 |
from django import forms
from django_filters.widgets import RangeWidget
class DropDownFilterWidget(forms.widgets.ChoiceWidget):
template_name = 'foirequest/widgets/dropdown_filter.html'
def __init__(self, *args, **kwargs):
self.get_url = kwargs.pop('get_url', None)
super().__init__(*args, **kwargs)
def render(self, name, value, attrs=None, renderer=None):
value = super(DropDownFilterWidget, self).render(name, value, attrs=attrs, renderer=renderer)
return value
def get_context(self, name, value, attrs):
self.default_label = self.attrs.get('label', '')
self.selected_label = self.default_label
context = super(DropDownFilterWidget, self).get_context(
name, value, attrs
)
context['selected_label'] = self.selected_label
context['default_label'] = self.default_label
return context
def create_option(self, name, value, label, selected, index,
subindex=None, attrs=None):
option = super(DropDownFilterWidget, self).create_option(name, value,
label, selected, index, subindex=subindex, attrs=attrs)
if selected and value:
self.selected_label = label
# Data is set on widget directly before rendering
data = self.data.copy()
data[name] = value
option['url'] = self.get_url(data)
return option
class AttachmentFileWidget(forms.ClearableFileInput):
template_name = 'foirequest/widgets/attachment_file.html'
class DateRangeWidget(RangeWidget):
template_name = 'foirequest/widgets/daterange.html'
def __init__(self):
widgets = [
forms.DateInput(attrs={'class': 'form-control', 'type': 'date'}),
forms.DateInput(attrs={'class': 'form-control', 'type': 'date'})
]
super(RangeWidget, self).__init__(widgets)
| stefanw/froide | froide/foirequest/widgets.py | Python | mit | 1,895 |
from ..operations import Operations
from .migration import MigrationContext
from .. import util
class EnvironmentContext(util.ModuleClsProxy):
"""A configurational facade made available in an ``env.py`` script.
The :class:`.EnvironmentContext` acts as a *facade* to the more
nuts-and-bolts objects of :class:`.MigrationContext` as well as certain
aspects of :class:`.Config`,
within the context of the ``env.py`` script that is invoked by
most Alembic commands.
:class:`.EnvironmentContext` is normally instantiated
when a command in :mod:`alembic.command` is run. It then makes
itself available in the ``alembic.context`` module for the scope
of the command. From within an ``env.py`` script, the current
:class:`.EnvironmentContext` is available by importing this module.
:class:`.EnvironmentContext` also supports programmatic usage.
At this level, it acts as a Python context manager, that is, is
intended to be used using the
``with:`` statement. A typical use of :class:`.EnvironmentContext`::
from alembic.config import Config
from alembic.script import ScriptDirectory
config = Config()
config.set_main_option("script_location", "myapp:migrations")
script = ScriptDirectory.from_config(config)
def my_function(rev, context):
'''do something with revision "rev", which
will be the current database revision,
and "context", which is the MigrationContext
that the env.py will create'''
with EnvironmentContext(
config,
script,
fn = my_function,
as_sql = False,
starting_rev = 'base',
destination_rev = 'head',
tag = "sometag"
):
script.run_env()
The above script will invoke the ``env.py`` script
within the migration environment. If and when ``env.py``
calls :meth:`.MigrationContext.run_migrations`, the
``my_function()`` function above will be called
by the :class:`.MigrationContext`, given the context
itself as well as the current revision in the database.
.. note::
For most API usages other than full blown
invocation of migration scripts, the :class:`.MigrationContext`
and :class:`.ScriptDirectory` objects can be created and
used directly. The :class:`.EnvironmentContext` object
is *only* needed when you need to actually invoke the
``env.py`` module present in the migration environment.
"""
_migration_context = None
config = None
"""An instance of :class:`.Config` representing the
configuration file contents as well as other variables
set programmatically within it."""
script = None
"""An instance of :class:`.ScriptDirectory` which provides
programmatic access to version files within the ``versions/``
directory.
"""
def __init__(self, config, script, **kw):
"""Construct a new :class:`.EnvironmentContext`.
:param config: a :class:`.Config` instance.
:param script: a :class:`.ScriptDirectory` instance.
:param \**kw: keyword options that will be ultimately
passed along to the :class:`.MigrationContext` when
:meth:`.EnvironmentContext.configure` is called.
"""
self.config = config
self.script = script
self.context_opts = kw
def __enter__(self):
"""Establish a context which provides a
:class:`.EnvironmentContext` object to
env.py scripts.
The :class:`.EnvironmentContext` will
be made available as ``from alembic import context``.
"""
self._install_proxy()
return self
def __exit__(self, *arg, **kw):
self._remove_proxy()
def is_offline_mode(self):
"""Return True if the current migrations environment
is running in "offline mode".
This is ``True`` or ``False`` depending
on the the ``--sql`` flag passed.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.context_opts.get('as_sql', False)
def is_transactional_ddl(self):
"""Return True if the context is configured to expect a
transactional DDL capable backend.
This defaults to the type of database in use, and
can be overridden by the ``transactional_ddl`` argument
to :meth:`.configure`
This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
return self.get_context().impl.transactional_ddl
def requires_connection(self):
return not self.is_offline_mode()
def get_head_revision(self):
"""Return the hex identifier of the 'head' script revision.
If the script directory has multiple heads, this
method raises a :class:`.CommandError`;
:meth:`.EnvironmentContext.get_head_revisions` should be preferred.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso:: :meth:`.EnvironmentContext.get_head_revisions`
"""
return self.script.as_revision_number("head")
def get_head_revisions(self):
"""Return the hex identifier of the 'heads' script revision(s).
This returns a tuple containing the version number of all
heads in the script directory.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. versionadded:: 0.7.0
"""
return self.script.as_revision_number("heads")
def get_starting_revision_argument(self):
"""Return the 'starting revision' argument,
if the revision was passed using ``start:end``.
This is only meaningful in "offline" mode.
Returns ``None`` if no value is available
or was configured.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
if self._migration_context is not None:
return self.script.as_revision_number(
self.get_context()._start_from_rev)
elif 'starting_rev' in self.context_opts:
return self.script.as_revision_number(
self.context_opts['starting_rev'])
else:
# this should raise only in the case that a command
# is being run where the "starting rev" is never applicable;
# this is to catch scripts which rely upon this in
# non-sql mode or similar
raise util.CommandError(
"No starting revision argument is available.")
def get_revision_argument(self):
"""Get the 'destination' revision argument.
This is typically the argument passed to the
``upgrade`` or ``downgrade`` command.
If it was specified as ``head``, the actual
version number is returned; if specified
as ``base``, ``None`` is returned.
This function does not require that the :class:`.MigrationContext`
has been configured.
"""
return self.script.as_revision_number(
self.context_opts['destination_rev'])
def get_tag_argument(self):
"""Return the value passed for the ``--tag`` argument, if any.
The ``--tag`` argument is not used directly by Alembic,
but is available for custom ``env.py`` configurations that
wish to use it; particularly for offline generation scripts
that wish to generate tagged filenames.
This function does not require that the :class:`.MigrationContext`
has been configured.
.. seealso::
:meth:`.EnvironmentContext.get_x_argument` - a newer and more
open ended system of extending ``env.py`` scripts via the command
line.
"""
return self.context_opts.get('tag', None)
def get_x_argument(self, as_dictionary=False):
"""Return the value(s) passed for the ``-x`` argument, if any.
The ``-x`` argument is an open ended flag that allows any user-defined
value or values to be passed on the command line, then available
here for consumption by a custom ``env.py`` script.
The return value is a list, returned directly from the ``argparse``
structure. If ``as_dictionary=True`` is passed, the ``x`` arguments
are parsed using ``key=value`` format into a dictionary that is
then returned.
For example, to support passing a database URL on the command line,
the standard ``env.py`` script can be modified like this::
cmd_line_url = context.get_x_argument(
as_dictionary=True).get('dbname')
if cmd_line_url:
engine = create_engine(cmd_line_url)
else:
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
This then takes effect by running the ``alembic`` script as::
alembic -x dbname=postgresql://user:pass@host/dbname upgrade head
This function does not require that the :class:`.MigrationContext`
has been configured.
.. versionadded:: 0.6.0
.. seealso::
:meth:`.EnvironmentContext.get_tag_argument`
:attr:`.Config.cmd_opts`
"""
if self.config.cmd_opts is not None:
value = self.config.cmd_opts.x or []
else:
value = []
if as_dictionary:
value = dict(
arg.split('=', 1) for arg in value
)
return value
def configure(self,
connection=None,
url=None,
dialect_name=None,
transactional_ddl=None,
transaction_per_migration=False,
output_buffer=None,
starting_rev=None,
tag=None,
template_args=None,
render_as_batch=False,
target_metadata=None,
include_symbol=None,
include_object=None,
include_schemas=False,
process_revision_directives=None,
compare_type=False,
compare_server_default=False,
render_item=None,
literal_binds=False,
upgrade_token="upgrades",
downgrade_token="downgrades",
alembic_module_prefix="op.",
sqlalchemy_module_prefix="sa.",
user_module_prefix=None,
**kw
):
"""Configure a :class:`.MigrationContext` within this
:class:`.EnvironmentContext` which will provide database
connectivity and other configuration to a series of
migration scripts.
Many methods on :class:`.EnvironmentContext` require that
this method has been called in order to function, as they
ultimately need to have database access or at least access
to the dialect in use. Those which do are documented as such.
The important thing needed by :meth:`.configure` is a
means to determine what kind of database dialect is in use.
An actual connection to that database is needed only if
the :class:`.MigrationContext` is to be used in
"online" mode.
If the :meth:`.is_offline_mode` function returns ``True``,
then no connection is needed here. Otherwise, the
``connection`` parameter should be present as an
instance of :class:`sqlalchemy.engine.Connection`.
This function is typically called from the ``env.py``
script within a migration environment. It can be called
multiple times for an invocation. The most recent
:class:`~sqlalchemy.engine.Connection`
for which it was called is the one that will be operated upon
by the next call to :meth:`.run_migrations`.
General parameters:
:param connection: a :class:`~sqlalchemy.engine.Connection`
to use
for SQL execution in "online" mode. When present, is also
used to determine the type of dialect in use.
:param url: a string database url, or a
:class:`sqlalchemy.engine.url.URL` object.
The type of dialect to be used will be derived from this if
``connection`` is not passed.
:param dialect_name: string name of a dialect, such as
"postgresql", "mssql", etc.
The type of dialect to be used will be derived from this if
``connection`` and ``url`` are not passed.
:param transactional_ddl: Force the usage of "transactional"
DDL on or off;
this otherwise defaults to whether or not the dialect in
use supports it.
:param transaction_per_migration: if True, nest each migration script
in a transaction rather than the full series of migrations to
run.
.. versionadded:: 0.6.5
:param output_buffer: a file-like object that will be used
for textual output
when the ``--sql`` option is used to generate SQL scripts.
Defaults to
``sys.stdout`` if not passed here and also not present on
the :class:`.Config`
object. The value here overrides that of the :class:`.Config`
object.
:param output_encoding: when using ``--sql`` to generate SQL
scripts, apply this encoding to the string output.
:param literal_binds: when using ``--sql`` to generate SQL
scripts, pass through the ``literal_binds`` flag to the compiler
so that any literal values that would ordinarily be bound
parameters are converted to plain strings.
.. warning:: Dialects can typically only handle simple datatypes
like strings and numbers for auto-literal generation. Datatypes
like dates, intervals, and others may still require manual
formatting, typically using :meth:`.Operations.inline_literal`.
.. note:: the ``literal_binds`` flag is ignored on SQLAlchemy
versions prior to 0.8 where this feature is not supported.
.. versionadded:: 0.7.6
.. seealso::
:meth:`.Operations.inline_literal`
:param starting_rev: Override the "starting revision" argument
when using ``--sql`` mode.
:param tag: a string tag for usage by custom ``env.py`` scripts.
Set via the ``--tag`` option, can be overridden here.
:param template_args: dictionary of template arguments which
will be added to the template argument environment when
running the "revision" command. Note that the script environment
is only run within the "revision" command if the --autogenerate
option is used, or if the option "revision_environment=true"
is present in the alembic.ini file.
:param version_table: The name of the Alembic version table.
The default is ``'alembic_version'``.
:param version_table_schema: Optional schema to place version
table within.
Parameters specific to the autogenerate feature, when
``alembic revision`` is run with the ``--autogenerate`` feature:
:param target_metadata: a :class:`sqlalchemy.schema.MetaData`
object that
will be consulted during autogeneration. The tables present
will be compared against
what is locally available on the target
:class:`~sqlalchemy.engine.Connection`
to produce candidate upgrade/downgrade operations.
:param compare_type: Indicates type comparison behavior during
an autogenerate
operation. Defaults to ``False`` which disables type
comparison. Set to
``True`` to turn on default type comparison, which has varied
accuracy depending on backend. See :ref:`compare_types`
for an example as well as information on other type
comparison options.
.. seealso::
:ref:`compare_types`
:paramref:`.EnvironmentContext.configure.compare_server_default`
:param compare_server_default: Indicates server default comparison
behavior during
an autogenerate operation. Defaults to ``False`` which disables
server default
comparison. Set to ``True`` to turn on server default comparison,
which has
varied accuracy depending on backend.
To customize server default comparison behavior, a callable may
be specified
which can filter server default comparisons during an
autogenerate operation.
defaults during an autogenerate operation. The format of this
callable is::
def my_compare_server_default(context, inspected_column,
metadata_column, inspected_default, metadata_default,
rendered_metadata_default):
# return True if the defaults are different,
# False if not, or None to allow the default implementation
# to compare these defaults
return None
context.configure(
# ...
compare_server_default = my_compare_server_default
)
``inspected_column`` is a dictionary structure as returned by
:meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas
``metadata_column`` is a :class:`sqlalchemy.schema.Column` from
the local model environment.
A return value of ``None`` indicates to allow default server default
comparison
to proceed. Note that some backends such as Postgresql actually
execute
the two defaults on the database side to compare for equivalence.
.. seealso::
:paramref:`.EnvironmentContext.configure.compare_type`
:param include_object: A callable function which is given
the chance to return ``True`` or ``False`` for any object,
indicating if the given object should be considered in the
autogenerate sweep.
The function accepts the following positional arguments:
* ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such
as a :class:`~sqlalchemy.schema.Table`,
:class:`~sqlalchemy.schema.Column`,
:class:`~sqlalchemy.schema.Index`
:class:`~sqlalchemy.schema.UniqueConstraint`,
or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object
* ``name``: the name of the object. This is typically available
via ``object.name``.
* ``type``: a string describing the type of object; currently
``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``,
or ``"foreign_key_constraint"``
.. versionadded:: 0.7.0 Support for indexes and unique constraints
within the
:paramref:`~.EnvironmentContext.configure.include_object` hook.
.. versionadded:: 0.7.1 Support for foreign keys within the
:paramref:`~.EnvironmentContext.configure.include_object` hook.
* ``reflected``: ``True`` if the given object was produced based on
table reflection, ``False`` if it's from a local :class:`.MetaData`
object.
* ``compare_to``: the object being compared against, if available,
else ``None``.
E.g.::
def include_object(object, name, type_, reflected, compare_to):
if (type_ == "column" and
not reflected and
object.info.get("skip_autogenerate", False)):
return False
else:
return True
context.configure(
# ...
include_object = include_object
)
:paramref:`.EnvironmentContext.configure.include_object` can also
be used to filter on specific schemas to include or omit, when
the :paramref:`.EnvironmentContext.configure.include_schemas`
flag is set to ``True``. The :attr:`.Table.schema` attribute
on each :class:`.Table` object reflected will indicate the name of the
schema from which the :class:`.Table` originates.
.. versionadded:: 0.6.0
.. seealso::
:paramref:`.EnvironmentContext.configure.include_schemas`
:param include_symbol: A callable function which, given a table name
and schema name (may be ``None``), returns ``True`` or ``False``,
indicating if the given table should be considered in the
autogenerate sweep.
.. deprecated:: 0.6.0
:paramref:`.EnvironmentContext.configure.include_symbol`
is superceded by the more generic
:paramref:`.EnvironmentContext.configure.include_object`
parameter.
E.g.::
def include_symbol(tablename, schema):
return tablename not in ("skip_table_one", "skip_table_two")
context.configure(
# ...
include_symbol = include_symbol
)
.. seealso::
:paramref:`.EnvironmentContext.configure.include_schemas`
:paramref:`.EnvironmentContext.configure.include_object`
:param render_as_batch: if True, commands which alter elements
within a table will be placed under a ``with batch_alter_table():``
directive, so that batch migrations will take place.
.. versionadded:: 0.7.0
.. seealso::
:ref:`batch_migrations`
:param include_schemas: If True, autogenerate will scan across
all schemas located by the SQLAlchemy
:meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names`
method, and include all differences in tables found across all
those schemas. When using this option, you may want to also
use the :paramref:`.EnvironmentContext.configure.include_object`
option to specify a callable which
can filter the tables/schemas that get included.
.. seealso::
:paramref:`.EnvironmentContext.configure.include_object`
:param render_item: Callable that can be used to override how
any schema item, i.e. column, constraint, type,
etc., is rendered for autogenerate. The callable receives a
string describing the type of object, the object, and
the autogen context. If it returns False, the
default rendering method will be used. If it returns None,
the item will not be rendered in the context of a Table
construct, that is, can be used to skip columns or constraints
within op.create_table()::
def my_render_column(type_, col, autogen_context):
if type_ == "column" and isinstance(col, MySpecialCol):
return repr(col)
else:
return False
context.configure(
# ...
render_item = my_render_column
)
Available values for the type string include: ``"column"``,
``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``,
``"type"``, ``"server_default"``.
.. seealso::
:ref:`autogen_render_types`
:param upgrade_token: When autogenerate completes, the text of the
candidate upgrade operations will be present in this template
variable when ``script.py.mako`` is rendered. Defaults to
``upgrades``.
:param downgrade_token: When autogenerate completes, the text of the
candidate downgrade operations will be present in this
template variable when ``script.py.mako`` is rendered. Defaults to
``downgrades``.
:param alembic_module_prefix: When autogenerate refers to Alembic
:mod:`alembic.operations` constructs, this prefix will be used
(i.e. ``op.create_table``) Defaults to "``op.``".
Can be ``None`` to indicate no prefix.
:param sqlalchemy_module_prefix: When autogenerate refers to
SQLAlchemy
:class:`~sqlalchemy.schema.Column` or type classes, this prefix
will be used
(i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``".
Can be ``None`` to indicate no prefix.
Note that when dialect-specific types are rendered, autogenerate
will render them using the dialect module name, i.e. ``mssql.BIT()``,
``postgresql.UUID()``.
:param user_module_prefix: When autogenerate refers to a SQLAlchemy
type (e.g. :class:`.TypeEngine`) where the module name is not
under the ``sqlalchemy`` namespace, this prefix will be used
within autogenerate. If left at its default of
``None``, the ``__module__`` attribute of the type is used to
render the import module. It's a good practice to set this
and to have all custom types be available from a fixed module space,
in order to future-proof migration files against reorganizations
in modules.
.. versionchanged:: 0.7.0
:paramref:`.EnvironmentContext.configure.user_module_prefix`
no longer defaults to the value of
:paramref:`.EnvironmentContext.configure.sqlalchemy_module_prefix`
when left at ``None``; the ``__module__`` attribute is now used.
.. versionadded:: 0.6.3 added
:paramref:`.EnvironmentContext.configure.user_module_prefix`
.. seealso::
:ref:`autogen_module_prefix`
:param process_revision_directives: a callable function that will
be passed a structure representing the end result of an autogenerate
or plain "revision" operation, which can be manipulated to affect
how the ``alembic revision`` command ultimately outputs new
revision scripts. The structure of the callable is::
def process_revision_directives(context, revision, directives):
pass
The ``directives`` parameter is a Python list containing
a single :class:`.MigrationScript` directive, which represents
the revision file to be generated. This list as well as its
contents may be freely modified to produce any set of commands.
The section :ref:`customizing_revision` shows an example of
doing this. The ``context`` parameter is the
:class:`.MigrationContext` in use,
and ``revision`` is a tuple of revision identifiers representing the
current revision of the database.
The callable is invoked at all times when the ``--autogenerate``
option is passed to ``alembic revision``. If ``--autogenerate``
is not passed, the callable is invoked only if the
``revision_environment`` variable is set to True in the Alembic
configuration, in which case the given ``directives`` collection
will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps`
collections for ``.upgrade_ops`` and ``.downgrade_ops``. The
``--autogenerate`` option itself can be inferred by inspecting
``context.config.cmd_opts.autogenerate``.
The callable function may optionally be an instance of
a :class:`.Rewriter` object. This is a helper object that
assists in the production of autogenerate-stream rewriter functions.
.. versionadded:: 0.8.0
.. seealso::
:ref:`customizing_revision`
:ref:`autogen_rewriter`
Parameters specific to individual backends:
:param mssql_batch_separator: The "batch separator" which will
be placed between each statement when generating offline SQL Server
migrations. Defaults to ``GO``. Note this is in addition to the
customary semicolon ``;`` at the end of each statement; SQL Server
considers the "batch separator" to denote the end of an
individual statement execution, and cannot group certain
dependent operations in one step.
:param oracle_batch_separator: The "batch separator" which will
be placed between each statement when generating offline
Oracle migrations. Defaults to ``/``. Oracle doesn't add a
semicolon between statements like most other backends.
"""
opts = self.context_opts
if transactional_ddl is not None:
opts["transactional_ddl"] = transactional_ddl
if output_buffer is not None:
opts["output_buffer"] = output_buffer
elif self.config.output_buffer is not None:
opts["output_buffer"] = self.config.output_buffer
if starting_rev:
opts['starting_rev'] = starting_rev
if tag:
opts['tag'] = tag
if template_args and 'template_args' in opts:
opts['template_args'].update(template_args)
opts["transaction_per_migration"] = transaction_per_migration
opts['target_metadata'] = target_metadata
opts['include_symbol'] = include_symbol
opts['include_object'] = include_object
opts['include_schemas'] = include_schemas
opts['render_as_batch'] = render_as_batch
opts['upgrade_token'] = upgrade_token
opts['downgrade_token'] = downgrade_token
opts['sqlalchemy_module_prefix'] = sqlalchemy_module_prefix
opts['alembic_module_prefix'] = alembic_module_prefix
opts['user_module_prefix'] = user_module_prefix
opts['literal_binds'] = literal_binds
opts['process_revision_directives'] = process_revision_directives
if render_item is not None:
opts['render_item'] = render_item
if compare_type is not None:
opts['compare_type'] = compare_type
if compare_server_default is not None:
opts['compare_server_default'] = compare_server_default
opts['script'] = self.script
opts.update(kw)
self._migration_context = MigrationContext.configure(
connection=connection,
url=url,
dialect_name=dialect_name,
environment_context=self,
opts=opts
)
def run_migrations(self, **kw):
"""Run migrations as determined by the current command line
configuration
as well as versioning information present (or not) in the current
database connection (if one is present).
The function accepts optional ``**kw`` arguments. If these are
passed, they are sent directly to the ``upgrade()`` and
``downgrade()``
functions within each target revision file. By modifying the
``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()``
functions accept arguments, parameters can be passed here so that
contextual information, usually information to identify a particular
database in use, can be passed from a custom ``env.py`` script
to the migration functions.
This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
with Operations.context(self._migration_context):
self.get_context().run_migrations(**kw)
def execute(self, sql, execution_options=None):
"""Execute the given SQL using the current change context.
The behavior of :meth:`.execute` is the same
as that of :meth:`.Operations.execute`. Please see that
function's documentation for full detail including
caveats and limitations.
This function requires that a :class:`.MigrationContext` has
first been made available via :meth:`.configure`.
"""
self.get_context().execute(sql,
execution_options=execution_options)
def static_output(self, text):
"""Emit text directly to the "offline" SQL stream.
Typically this is for emitting comments that
start with --. The statement is not treated
as a SQL execution, no ; or batch separator
is added, etc.
"""
self.get_context().impl.static_output(text)
def begin_transaction(self):
"""Return a context manager that will
enclose an operation within a "transaction",
as defined by the environment's offline
and transactional DDL settings.
e.g.::
with context.begin_transaction():
context.run_migrations()
:meth:`.begin_transaction` is intended to
"do the right thing" regardless of
calling context:
* If :meth:`.is_transactional_ddl` is ``False``,
returns a "do nothing" context manager
which otherwise produces no transactional
state or directives.
* If :meth:`.is_offline_mode` is ``True``,
returns a context manager that will
invoke the :meth:`.DefaultImpl.emit_begin`
and :meth:`.DefaultImpl.emit_commit`
methods, which will produce the string
directives ``BEGIN`` and ``COMMIT`` on
the output stream, as rendered by the
target backend (e.g. SQL Server would
emit ``BEGIN TRANSACTION``).
* Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin`
on the current online connection, which
returns a :class:`sqlalchemy.engine.Transaction`
object. This object demarcates a real
transaction and is itself a context manager,
which will roll back if an exception
is raised.
Note that a custom ``env.py`` script which
has more specific transactional needs can of course
manipulate the :class:`~sqlalchemy.engine.Connection`
directly to produce transactional state in "online"
mode.
"""
return self.get_context().begin_transaction()
def get_context(self):
"""Return the current :class:`.MigrationContext` object.
If :meth:`.EnvironmentContext.configure` has not been
called yet, raises an exception.
"""
if self._migration_context is None:
raise Exception("No context has been configured yet.")
return self._migration_context
def get_bind(self):
"""Return the current 'bind'.
In "online" mode, this is the
:class:`sqlalchemy.engine.Connection` currently being used
to emit SQL to the database.
This function requires that a :class:`.MigrationContext`
has first been made available via :meth:`.configure`.
"""
return self.get_context().bind
def get_impl(self):
return self.get_context().impl
| graingert/alembic | alembic/runtime/environment.py | Python | mit | 35,396 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
def get_parent_doc(doc):
"""Returns document of `reference_doctype`, `reference_doctype`"""
if not hasattr(doc, "parent_doc"):
if doc.reference_doctype and doc.reference_name:
doc.parent_doc = frappe.get_doc(doc.reference_doctype, doc.reference_name)
else:
doc.parent_doc = None
return doc.parent_doc
def set_timeline_doc(doc):
"""Set timeline_doctype and timeline_name"""
parent_doc = get_parent_doc(doc)
if (doc.timeline_doctype and doc.timeline_name) or not parent_doc:
return
timeline_field = parent_doc.meta.timeline_field
if not timeline_field:
return
doctype = parent_doc.meta.get_link_doctype(timeline_field)
name = parent_doc.get(timeline_field)
if doctype and name:
doc.timeline_doctype = doctype
doc.timeline_name = name
else:
return
def find(list_of_dict, match_function):
'''Returns a dict in a list of dicts on matching the conditions
provided in match function
Usage:
list_of_dict = [{'name': 'Suraj'}, {'name': 'Aditya'}]
required_dict = find(list_of_dict, lambda d: d['name'] == 'Aditya')
'''
for entry in list_of_dict:
if match_function(entry):
return entry
return None
def find_all(list_of_dict, match_function):
'''Returns all matching dicts in a list of dicts.
Uses matching function to filter out the dicts
Usage:
colored_shapes = [
{'color': 'red', 'shape': 'square'},
{'color': 'red', 'shape': 'circle'},
{'color': 'blue', 'shape': 'triangle'}
]
red_shapes = find_all(colored_shapes, lambda d: d['color'] == 'red')
'''
found = []
for entry in list_of_dict:
if match_function(entry):
found.append(entry)
return found
def ljust_list(_list, length, fill_word=None):
"""
Similar to ljust but for list.
Usage:
$ ljust_list([1, 2, 3], 5)
> [1, 2, 3, None, None]
"""
# make a copy to avoid mutation of passed list
_list = list(_list)
fill_length = length - len(_list)
if fill_length > 0:
_list.extend([fill_word] * fill_length)
return _list
| mhbu50/frappe | frappe/core/utils.py | Python | mit | 2,081 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-08-16 21:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('quotes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='quote',
name='mentions',
field=models.ManyToManyField(blank=True, editable=False, related_name='mentioned_in', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='quote',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='author_of', to=settings.AUTH_USER_MODEL),
),
]
| nivbend/memoir | quotes/migrations/0002_mentions.py | Python | mit | 885 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
File name: myWirelessRouter.py
Author: xu42 <https://github.com/xu42>
Date created: 04/04/2016
Python Version: 3.5
监测某台无线设备是否接入路由器, 同时猜解当前任务
适用于 Mercury MW310R 型号的无线路由器
'''
import http.client
import base64
import re
import time
# 配置参数
# url: 路由器后台登陆地址
# port: 路由器后台登陆地址端口
# timeout: 超时
# password: 路由器后台登陆密码
# mac_address: 需要监测的设备的MAC地址
# payload: ajax请求时发送的数据, 请忽视
config = {
'url': '123.123.123.123',
'port': '80',
'timeout': 20,
'password': '123456',
'mac_address': '11:11:11:11:11:11',
'payload': '[LAN_WLAN_ASSOC_DEV#0,0,0,0,0,0#0,0,0,0,0,0]0,4\r\nAssociatedDeviceMACAddress\r\nX_TP_TotalPacketsSent\r\nX_TP_TotalPacketsReceived\r\nX_TP_HostName\r\n'
}
# 构造请求头部
def __setHeaders():
headers = {
'cookie': 'Authorization=Basic ' + base64.b64encode(config['password'].encode('utf-8')).decode('utf-8'),
'referer': config['url']
}
return headers
# 发起请求
def __initRequest():
conn = http.client.HTTPConnection(config['url'], config['port'], config['timeout'])
conn.request('POST', "/cgi?5", config['payload'], __setHeaders())
res = conn.getresponse()
if res.status != 200:
print(res.status, res.reason)
exit()
data = res.read()
conn.close()
return data.decode('utf-8')
# 解析当前接入设备和接收发送数据包量
def __getAssociatedDevice():
a = re.findall(r"=(.+?)\s", __initRequest())
# device_list = zip(a[::3], a[1::3], a[2::3])
return a
# 判断某一设备是否在线
def __isOnline(MACAddress):
device_list = __getAssociatedDevice()
try:
device_list.index(MACAddress)
return True
except ValueError:
return False
# 猜解当前在做什么
def __doWhat(MACAddress = config['mac_address']):
if __isOnline(MACAddress):
print("已连入WIFI...")
print("正在猜解当前在做...")
device_list_1 = __getAssociatedDevice()
time.sleep(10)
device_list_2 = __getAssociatedDevice()
index_1 = device_list_1.index(MACAddress)
index_2 = device_list_2.index(MACAddress)
less = int(device_list_2[index_2 + 2]) - int(device_list_1[index_1 + 2])
if less < 5:
print("10s内接收数据包:", less, ",可能连着WIFI什么也没做...")
elif less < 30:
print("10s内接收数据包:", less, ",可能聊着QQ...")
else:
print("10s内接收数据包:", less, ",可能刷微博|看视频...")
else:
print("没有连入WIFI...")
def main():
__doWhat()
if __name__ == '__main__':
main()
| xu42/Python | myWirelessRouter/myWirelessRouter.py | Python | mit | 2,628 |
from django.urls import path
from . import views
urlpatterns = [
path('overview/', views.overview, name='overview'),
]
| mrts/foodbank-campaign | src/locations/urls.py | Python | mit | 125 |
from schedulerEdge import SchedulerEdge
import time
if __name__ == '__main__':
#scheduler = BackgroundScheduler()
#scheduler.add_job(tick, 'cron', second = '5,10',minute = '40' , id = "12")
#scheduler.start()
#while True:
# time.sleep(1)
test_sched = SchedulerEdge()
#test_sched.add_job(3)
test_sched.add_job('{ "modo": "cron", "info": {"second": "5", "minute": "*/1", "hour": "*", "day": "*", "week": "*", "month": "*", "year": "*" }}')
#test_sched.add_job('interval-12')
#test_sched.add_job('interval-45')
#time.sleep(2)
#test_sched.remove_job('interval-45')
| hubertokf/lupsEdgeServer | projects/old_files/moduleOfRules/testesched.py | Python | mit | 613 |
import os.path
import sqlite3
from config import CONFIG
def init_db():
"""初始化数据库"""
f = os.path.exists(CONFIG['DB_FILE'])
if f:
print("数据库文件存在...")
with open(CONFIG['SQL_SCRIPT_FILE'], 'r', encoding='utf8') as f:
file_content = f.read()
con = sqlite3.connect(CONFIG['DB_FILE'])
cur = con.cursor()
cur.executescript(file_content)
con.commit()
con.close()
return "初始化数据库完成!"
print(init_db()) | zzir/white | init_db.py | Python | mit | 493 |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/scons-time/mem/stage.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify the mem --stage option.
"""
import TestSCons_time
test = TestSCons_time.TestSCons_time()
test.fake_logfile('foo-000-0.log', 0)
test.fake_logfile('foo-000-1.log', 0)
test.fake_logfile('foo-000-2.log', 0)
test.fake_logfile('foo-001-0.log', 1)
test.fake_logfile('foo-001-1.log', 1)
test.fake_logfile('foo-001-2.log', 1)
expect = """\
set key bottom left
plot '-' title "Startup" with lines lt 1, \\
'-' title "Full build" with lines lt 2, \\
'-' title "Up-to-date build" with lines lt 3
# Startup
0 %(index)s000.000
1 %(index)s001.000
e
# Full build
0 %(index)s000.000
1 %(index)s001.000
e
# Up-to-date build
0 %(index)s000.000
1 %(index)s001.000
e
"""
pre_read = expect % {'index' : 1}
post_read = expect % {'index' : 2}
pre_build = expect % {'index' : 3}
post_build = expect % {'index' : 4}
test.run(arguments = 'mem --fmt gnuplot --stage pre-read', stdout=pre_read)
test.run(arguments = 'mem --fmt gnuplot --stage=post-read', stdout=post_read)
test.run(arguments = 'mem --fmt gnuplot --stage=pre-build', stdout=pre_build)
test.run(arguments = 'mem --fmt gnuplot --stage post-build', stdout=post_build)
expect = """\
scons-time: mem: Unrecognized stage "unknown".
"""
test.run(arguments = 'mem --fmt gnuplot --stage unknown',
status = 1,
stderr = expect)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| EmanueleCannizzaro/scons | test/scons-time/mem/stage.py | Python | mit | 2,679 |
import sys
import os
import os.path
from jinja2 import Template
from configparser import ConfigParser
import io
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: <program> <deploy_cfg_template_file> <file_with_properties>")
print("Properties from <file_with_properties> will be applied to <deploy_cfg_template_file>")
print("template which will be overwritten with .orig copy saved in the same folder first.")
sys.exit(1)
file = open(sys.argv[1], 'r')
text = file.read()
t = Template(text)
config = ConfigParser()
if os.path.isfile(sys.argv[2]):
config.read(sys.argv[2])
elif "KBASE_ENDPOINT" in os.environ:
kbase_endpoint = os.environ.get("KBASE_ENDPOINT")
props = "[global]\n" + \
"kbase_endpoint = " + kbase_endpoint + "\n" + \
"job_service_url = " + kbase_endpoint + "/userandjobstate\n" + \
"workspace_url = " + kbase_endpoint + "/ws\n" + \
"shock_url = " + kbase_endpoint + "/shock-api\n" + \
"handle_url = " + kbase_endpoint + "/handle_service\n" + \
"srv_wiz_url = " + kbase_endpoint + "/service_wizard\n" + \
"njsw_url = " + kbase_endpoint + "/njs_wrapper\n"
if "AUTH_SERVICE_URL" in os.environ:
props += "auth_service_url = " + os.environ.get("AUTH_SERVICE_URL") + "\n"
elif "auth2services" in kbase_endpoint:
props += "auth_service_url = " + kbase_endpoint + "/auth/api/legacy/KBase/Sessions/Login\n"
props += "auth_service_url_allow_insecure = " + \
os.environ.get("AUTH_SERVICE_URL_ALLOW_INSECURE", "false") + "\n"
config.readfp(io.StringIO(props))
else:
raise ValueError('Neither ' + sys.argv[2] + ' file nor KBASE_ENDPOINT env-variable found')
props = dict(config.items("global"))
output = t.render(props)
with open(sys.argv[1] + ".orig", 'w') as f:
f.write(text)
with open(sys.argv[1], 'w') as f:
f.write(output)
| briehl/narrative-test | scripts/prepare_deploy_cfg.py | Python | mit | 2,057 |
# -*- coding: utf-8 -*-
"""Objects representing WikidataQuery query syntax and API."""
#
# (C) Pywikibot team, 2013
#
# Distributed under the terms of the MIT license.
from __future__ import unicode_literals
import json
import sys
if sys.version_info[0] > 2:
from urllib.parse import quote
basestring = (str, )
else:
from urllib2 import quote
import pickle
import os
import hashlib
import time
import tempfile
import pywikibot
from pywikibot.comms import http
from pywikibot.page import ItemPage, PropertyPage, Claim
from pywikibot import config
def listify(x):
"""
If given a non-list, encapsulate in a single-element list.
@rtype: list
"""
return x if isinstance(x, list) else [x]
class QuerySet():
"""
A QuerySet represents a set of queries or other query sets.
Queries may be joined by operators (AND and OR).
A QuerySet stores this information as a list of Query(Sets) and
a joiner operator to join them all together
"""
def __init__(self, q):
"""
Initialise a query set from a Query or another QuerySet.
@type q: Query or QuerySet
"""
self.qs = [q]
def addJoiner(self, args, joiner):
"""
Add to this QuerySet using the given joiner.
If the given joiner is not the same as we used before in
this QuerySet, nest the current one in parens before joining.
This makes the implicit grouping of the API explicit.
@return: a new query set representing the joining of this one and
the arguments
"""
if len(self.qs) > 1 and joiner != self.joiner:
left = QuerySet(self)
else:
left = self
left.joiner = joiner
for a in listify(args):
left.qs.append(a)
return left
def AND(self, args):
"""
Add the given args (Queries or QuerySets) to the Query set as a logical conjuction (AND).
@type args: Query or QuerySet
"""
return self.addJoiner(args, "AND")
def OR(self, args):
"""
Add the given args (Queries or QuerySets) to the Query set as a logical disjunction (OR).
@type args: Query or QuerySet
"""
return self.addJoiner(args, "OR")
def __str__(self):
"""
Output as an API-ready string.
@rtype: str
"""
def bracketIfQuerySet(q):
if isinstance(q, QuerySet) and q.joiner != self.joiner:
return "(%s)" % q
else:
return str(q)
s = bracketIfQuerySet(self.qs[0])
for q in self.qs[1:]:
s += " %s %s" % (self.joiner, bracketIfQuerySet(q))
return s
def __repr__(self):
return u"QuerySet(%s)" % self
class Query():
"""
A query is a single query for the WikidataQuery API.
For example:
claim[100:60] or link[enwiki]
Construction of a Query can throw a TypeError if you feed it bad
parameters. Exactly what these need to be depends on the Query
"""
def AND(self, ands):
"""
Produce a query set ANDing this query and all the given query/sets.
@type ands: Query or list of Query
"""
return QuerySet(self).addJoiner(ands, "AND")
def OR(self, ors):
"""
Produce a query set ORing this query and all the given query/sets.
@type ors: Query or list of Query
"""
return QuerySet(self).addJoiner(ors, "OR")
def formatItem(self, item):
"""
Default item formatting is string.
This will work for queries, querysets, ints and strings
"""
return str(item)
def formatList(self, l):
"""
Format and comma-join a list.
@type l: list
"""
return ",".join([self.formatItem(x) for x in l])
@staticmethod
def isOrContainsOnlyTypes(items, types):
"""
Either this item is one of the given types, or it is a list of only those types.
@rtype: bool
"""
if isinstance(items, list):
for x in items:
found = False
for typ in listify(types):
if isinstance(x, typ):
found = True
break
if not found:
return False
else:
for typ in listify(types):
found = False
if isinstance(items, typ):
found = True
break
if not found:
return False
return True
def validate(self):
"""
Validate the query parameters.
Default validate result is a pass - subclasses need to implement
this if they want to check their parameters.
@return: True
@rtype: bool
"""
return True
def validateOrRaise(self, msg=None):
if not self.validate():
raise TypeError(msg)
def convertWDType(self, item):
"""
Convert Wikibase items like ItemPage or PropertyPage into integer IDs.
The resulting IDs may be used in query strings.
@param item: A single item. One of ItemPages, PropertyPages, int
or anything that can be fed to int()
@return: the int ID of the item
"""
if isinstance(item, ItemPage) or isinstance(item, PropertyPage):
return item.getID(numeric=True)
else:
return int(item)
def convertWDTypes(self, items):
return [self.convertWDType(x) for x in listify(items)]
def __str__(self):
"""
Generate a query string to be passed to the WDQ API.
Sub-classes must override this method.
@raise NotImplementedError: Always raised by this abstract method
"""
raise NotImplementedError
def __repr__(self):
return u"Query(%s)" % self
class HasClaim(Query):
"""
This is a Query of the form "claim[prop:val]".
It is subclassed by
the other similar forms like noclaim and string
"""
queryType = "claim"
def __init__(self, prop, items=[]):
"""Constructor."""
self.prop = self.convertWDType(prop)
if isinstance(items, Query):
self.items = items
elif isinstance(self, StringClaim):
self.items = listify(items)
else:
self.items = self.convertWDTypes(items)
self.validateOrRaise()
def formatItems(self):
res = ''
if self.items:
res += ":" + ",".join([self.formatItem(x) for x in self.items])
return res
def validate(self):
return self.isOrContainsOnlyTypes(self.items, [int, Query])
def __str__(self):
if isinstance(self.items, list):
return "%s[%s%s]" % (self.queryType, self.prop, self.formatItems())
elif isinstance(self.items, Query):
return "%s[%s:(%s)]" % (self.queryType, self.prop, self.items)
class NoClaim(HasClaim):
"""Query of the form noclaim[PROPERTY]."""
queryType = "noclaim"
class StringClaim(HasClaim):
"""Query of the form string[PROPERTY:"STRING",...]."""
queryType = "string"
def formatItem(self, x):
"""Add quotes around string."""
return '"%s"' % x
def validate(self):
return self.isOrContainsOnlyTypes(self.items, basestring)
class Tree(Query):
"""Query of the form tree[ITEM,...][PROPERTY,...]<PROPERTY,...>."""
queryType = "tree"
def __init__(self, item, forward=[], reverse=[]):
"""
Constructor.
@param item: The root item
@param forward: List of forward properties, can be empty
@param reverse: List of reverse properties, can be empty
"""
# check sensible things coming in, as we lose info once we do
# type conversion
if not self.isOrContainsOnlyTypes(item, [int, ItemPage]):
raise TypeError("The item paramter must contain or be integer IDs "
"or page.ItemPages")
elif (not self.isOrContainsOnlyTypes(forward, [int, PropertyPage]) or
not self.isOrContainsOnlyTypes(reverse, [int, PropertyPage])):
raise TypeError("The forward and reverse parameters must contain "
"or be integer IDs or page.PropertyPages")
self.item = self.convertWDTypes(item)
self.forward = self.convertWDTypes(forward)
self.reverse = self.convertWDTypes(reverse)
self.validateOrRaise()
def validate(self):
return (self.isOrContainsOnlyTypes(self.item, int) and
self.isOrContainsOnlyTypes(self.forward, int) and
self.isOrContainsOnlyTypes(self.reverse, int))
def __str__(self):
return "%s[%s][%s][%s]" % (self.queryType, self.formatList(self.item),
self.formatList(self.forward),
self.formatList(self.reverse))
class Around(Query):
"""A query in the form around[PROPERTY,LATITUDE,LONGITUDE,RADIUS]."""
queryType = "around"
def __init__(self, prop, coord, rad):
"""Constructor."""
self.prop = self.convertWDType(prop)
self.lt = coord.lat
self.lg = coord.lon
self.rad = rad
def validate(self):
return isinstance(self.prop, int)
def __str__(self):
return "%s[%s,%s,%s,%s]" % (self.queryType, self.prop,
self.lt, self.lg, self.rad)
class Between(Query):
"""
A query in the form between[PROP, BEGIN, END].
You have to give prop and one of begin or end. Note that times have
to be in UTC, timezones are not supported by the API
@param prop: the property
@param begin: WbTime object representing the beginning of the period
@param end: WbTime object representing the end of the period
"""
queryType = "between"
def __init__(self, prop, begin=None, end=None):
"""Constructor."""
self.prop = self.convertWDType(prop)
self.begin = begin
self.end = end
def validate(self):
return (self.begin or self.end) and isinstance(self.prop, int)
def __str__(self):
begin = self.begin.toTimestr() if self.begin else ''
# if you don't have an end, you don't put in the comma
end = ',' + self.end.toTimestr() if self.end else ''
return "%s[%s,%s%s]" % (self.queryType, self.prop, begin, end)
class Link(Query):
"""
A query in the form link[LINK,...], which also includes nolink.
All link elements have to be strings, or validation will throw
"""
queryType = "link"
def __init__(self, link):
"""Constructor."""
self.link = listify(link)
self.validateOrRaise()
def validate(self):
return self.isOrContainsOnlyTypes(self.link, basestring)
def __str__(self):
return "%s[%s]" % (self.queryType, self.formatList(self.link))
class NoLink(Link):
"""A query in the form nolink[..]."""
queryType = "nolink"
def fromClaim(claim):
"""
Construct from a pywikibot.page Claim object.
@type claim: L{pywikibot.page.Claim}
@rtype: L{Query}
"""
if not isinstance(claim, Claim):
raise TypeError("claim must be a page.Claim")
if claim.type == 'wikibase-item':
return HasClaim(claim.getID(numeric=True), claim.getTarget().getID(numeric=True))
if claim.type == 'string':
return StringClaim(claim.getID(numeric=True), claim.getTarget())
else:
raise TypeError("Cannot construct a query from a claim of type %s"
% claim.type)
class WikidataQuery():
"""
An interface to the WikidataQuery API.
Default host is
https://wdq.wmflabs.org/, but you can substitute
a different one.
Caching defaults to a subdir of the system temp directory with a
1 hour max cache age.
Set a zero or negative maxCacheAge to disable caching
"""
def __init__(self, host="https://wdq.wmflabs.org", cacheDir=None,
cacheMaxAge=60):
"""Constructor."""
self.host = host
self.cacheMaxAge = cacheMaxAge
if cacheDir:
self.cacheDir = cacheDir
else:
self.cacheDir = os.path.join(tempfile.gettempdir(),
"wikidataquery_cache")
def getUrl(self, queryStr):
return "%s/api?%s" % (self.host, queryStr)
def getQueryString(self, q, labels=[], props=[]):
"""
Get the query string for a given query or queryset.
@return: string including labels and props
"""
qStr = "q=%s" % quote(str(q))
if labels:
qStr += "&labels=%s" % ','.join(labels)
if props:
qStr += "&props=%s" % ','.join(props)
return qStr
def getCacheFilename(self, queryStr):
"""
Encode a query into a unique and universally safe format.
@rtype: unicode
"""
encQuery = hashlib.sha1(queryStr.encode('utf8')).hexdigest() + ".wdq_cache"
return os.path.join(self.cacheDir, encQuery)
def readFromCache(self, queryStr):
"""
Load the query result from the cache, if possible.
@return: None if the data is not there or if it is too old.
"""
if self.cacheMaxAge <= 0:
return None
cacheFile = self.getCacheFilename(queryStr)
if os.path.isfile(cacheFile):
mtime = os.path.getmtime(cacheFile)
now = time.time()
if ((now - mtime) / 60) < self.cacheMaxAge:
with open(cacheFile, 'rb') as f:
try:
data = pickle.load(f)
except pickle.UnpicklingError:
pywikibot.warning(u"Couldn't read cached data from %s"
% cacheFile)
data = None
return data
return None
def saveToCache(self, q, data):
"""
Save data from a query to a cache file, if enabled.
@rtype: None
"""
if self.cacheMaxAge <= 0:
return
# we have to use our own query string, as otherwise we may
# be able to find the cache file again if there are e.g.
# whitespace differences
cacheFile = self.getCacheFilename(q)
if os.path.exists(cacheFile) and not os.path.isfile(cacheFile):
return
if not os.path.exists(self.cacheDir):
os.makedirs(self.cacheDir)
with open(cacheFile, 'wb') as f:
try:
pickle.dump(data, f, protocol=config.pickle_protocol)
except IOError:
pywikibot.warning(u"Failed to write cache file %s" % cacheFile)
def getDataFromHost(self, queryStr):
"""
Go and fetch a query from the host's API.
@rtype: dict
"""
url = self.getUrl(queryStr)
try:
resp = http.fetch(url)
except:
pywikibot.warning(u"Failed to retrieve %s" % url)
raise
try:
data = json.loads(resp.content)
except ValueError:
pywikibot.warning(u"Data received from host but no JSON could be decoded")
raise pywikibot.ServerError("Data received from host but no JSON could be decoded")
return data
def query(self, q, labels=[], props=[]):
"""
Actually run a query over the API.
@return: dict of the interpreted JSON or None on failure
"""
fullQueryString = self.getQueryString(q, labels, props)
# try to get cached data first
data = self.readFromCache(fullQueryString)
if data:
return data
# the cached data must not be OK, go and get real data from the
# host's API
data = self.getDataFromHost(fullQueryString)
# no JSON found
if not data:
return None
# cache data for next time
self.saveToCache(fullQueryString, data)
return data
| hperala/kontuwikibot | pywikibot/data/wikidataquery.py | Python | mit | 16,312 |
from typing import cast
from pytest import raises
from graphql import graphql_sync
from graphql.type import (
GraphQLArgument,
GraphQLBoolean,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLFloat,
GraphQLID,
GraphQLInt,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
assert_enum_type,
)
from graphql.utilities import (
build_schema,
build_client_schema,
introspection_from_schema,
print_schema,
)
from graphql.utilities.get_introspection_query import (
IntrospectionEnumType,
IntrospectionInputObjectType,
IntrospectionInterfaceType,
IntrospectionObjectType,
IntrospectionType,
IntrospectionUnionType,
)
from ..utils import dedent
def cycle_introspection(sdl_string: str):
"""Test that the client side introspection gives the same result.
This function does a full cycle of going from a string with the contents of the SDL,
build in-memory GraphQLSchema from it, produce a client-side representation of the
schema by using "build_client_schema" and then return that schema printed as SDL.
"""
server_schema = build_schema(sdl_string)
initial_introspection = introspection_from_schema(server_schema)
client_schema = build_client_schema(initial_introspection)
# If the client then runs the introspection query against the client-side schema,
# it should get a result identical to what was returned by the server
second_introspection = introspection_from_schema(client_schema)
# If the client then runs the introspection query against the client-side
# schema, it should get a result identical to what was returned by the server.
assert initial_introspection == second_introspection
return print_schema(client_schema)
def describe_type_system_build_schema_from_introspection():
def builds_a_simple_schema():
sdl = dedent(
'''
"""Simple schema"""
schema {
query: Simple
}
"""This is a simple type"""
type Simple {
"""This is a string field"""
string: String
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_without_the_query_type():
sdl = dedent(
"""
type Query {
foo: String
}
"""
)
schema = build_schema(sdl)
introspection = introspection_from_schema(schema)
del introspection["__schema"]["queryType"] # type: ignore
client_schema = build_client_schema(introspection)
assert client_schema.query_type is None
assert print_schema(client_schema) == sdl
def builds_a_simple_schema_with_all_operation_types():
sdl = dedent(
'''
schema {
query: QueryType
mutation: MutationType
subscription: SubscriptionType
}
"""This is a simple mutation type"""
type MutationType {
"""Set the string field"""
string: String
}
"""This is a simple query type"""
type QueryType {
"""This is a string field"""
string: String
}
"""This is a simple subscription type"""
type SubscriptionType {
"""This is a string field"""
string: String
}
'''
)
assert cycle_introspection(sdl) == sdl
def uses_built_in_scalars_when_possible():
sdl = dedent(
"""
scalar CustomScalar
type Query {
int: Int
float: Float
string: String
boolean: Boolean
id: ID
custom: CustomScalar
}
"""
)
assert cycle_introspection(sdl) == sdl
schema = build_schema(sdl)
introspection = introspection_from_schema(schema)
client_schema = build_client_schema(introspection)
# Built-ins are used
assert client_schema.get_type("Int") is GraphQLInt
assert client_schema.get_type("Float") is GraphQLFloat
assert client_schema.get_type("String") is GraphQLString
assert client_schema.get_type("Boolean") is GraphQLBoolean
assert client_schema.get_type("ID") is GraphQLID
# Custom are built
custom_scalar = schema.get_type("CustomScalar")
assert client_schema.get_type("CustomScalar") is not custom_scalar
def includes_standard_types_only_if_they_are_used():
schema = build_schema(
"""
type Query {
foo: String
}
"""
)
introspection = introspection_from_schema(schema)
client_schema = build_client_schema(introspection)
assert client_schema.get_type("Int") is None
assert client_schema.get_type("Float") is None
assert client_schema.get_type("ID") is None
def builds_a_schema_with_a_recursive_type_reference():
sdl = dedent(
"""
schema {
query: Recur
}
type Recur {
recur: Recur
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_a_circular_type_reference():
sdl = dedent(
"""
type Dog {
bestFriend: Human
}
type Human {
bestFriend: Dog
}
type Query {
dog: Dog
human: Human
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_an_interface():
sdl = dedent(
'''
type Dog implements Friendly {
bestFriend: Friendly
}
interface Friendly {
"""The best friend of this friendly thing"""
bestFriend: Friendly
}
type Human implements Friendly {
bestFriend: Friendly
}
type Query {
friendly: Friendly
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_an_interface_hierarchy():
sdl = dedent(
'''
type Dog implements Friendly & Named {
bestFriend: Friendly
name: String
}
interface Friendly implements Named {
"""The best friend of this friendly thing"""
bestFriend: Friendly
name: String
}
type Human implements Friendly & Named {
bestFriend: Friendly
name: String
}
interface Named {
name: String
}
type Query {
friendly: Friendly
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_an_implicit_interface():
sdl = dedent(
'''
type Dog implements Friendly {
bestFriend: Friendly
}
interface Friendly {
"""The best friend of this friendly thing"""
bestFriend: Friendly
}
type Query {
dog: Dog
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_a_union():
sdl = dedent(
"""
type Dog {
bestFriend: Friendly
}
union Friendly = Dog | Human
type Human {
bestFriend: Friendly
}
type Query {
friendly: Friendly
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_complex_field_values():
sdl = dedent(
"""
type Query {
string: String
listOfString: [String]
nonNullString: String!
nonNullListOfString: [String]!
nonNullListOfNonNullString: [String!]!
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_field_arguments():
sdl = dedent(
'''
type Query {
"""A field with a single arg"""
one(
"""This is an int arg"""
intArg: Int
): String
"""A field with a two args"""
two(
"""This is an list of int arg"""
listArg: [Int]
"""This is a required arg"""
requiredArg: Boolean!
): String
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_default_value_on_custom_scalar_field():
sdl = dedent(
"""
scalar CustomScalar
type Query {
testField(testArg: CustomScalar = "default"): String
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_an_enum():
food_enum = GraphQLEnumType(
"Food",
{
"VEGETABLES": GraphQLEnumValue(
1, description="Foods that are vegetables."
),
"FRUITS": GraphQLEnumValue(2),
"OILS": GraphQLEnumValue(3, deprecation_reason="Too fatty."),
},
description="Varieties of food stuffs",
)
schema = GraphQLSchema(
GraphQLObjectType(
"EnumFields",
{
"food": GraphQLField(
food_enum,
args={
"kind": GraphQLArgument(
food_enum, description="what kind of food?"
)
},
description="Repeats the arg you give it",
)
},
)
)
introspection = introspection_from_schema(schema)
client_schema = build_client_schema(introspection)
second_introspection = introspection_from_schema(client_schema)
assert second_introspection == introspection
# It's also an Enum type on the client.
client_food_enum = assert_enum_type(client_schema.get_type("Food"))
# Client types do not get server-only values, so the values mirror the names,
# rather than using the integers defined in the "server" schema.
values = {
name: value.to_kwargs() for name, value in client_food_enum.values.items()
}
assert values == {
"VEGETABLES": {
"value": "VEGETABLES",
"description": "Foods that are vegetables.",
"deprecation_reason": None,
"extensions": {},
"ast_node": None,
},
"FRUITS": {
"value": "FRUITS",
"description": None,
"deprecation_reason": None,
"extensions": {},
"ast_node": None,
},
"OILS": {
"value": "OILS",
"description": None,
"deprecation_reason": "Too fatty.",
"extensions": {},
"ast_node": None,
},
}
def builds_a_schema_with_an_input_object():
sdl = dedent(
'''
"""An input address"""
input Address {
"""What street is this address?"""
street: String!
"""The city the address is within?"""
city: String!
"""The country (blank will assume USA)."""
country: String = "USA"
}
type Query {
"""Get a geocode from an address"""
geocode(
"""The address to lookup"""
address: Address
): String
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_field_arguments_with_default_values():
sdl = dedent(
"""
input Geo {
lat: Float
lon: Float
}
type Query {
defaultInt(intArg: Int = 30): String
defaultList(listArg: [Int] = [1, 2, 3]): String
defaultObject(objArg: Geo = {lat: 37.485, lon: -122.148}): String
defaultNull(intArg: Int = null): String
noDefault(intArg: Int): String
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_custom_directives():
sdl = dedent(
'''
"""This is a custom directive"""
directive @customDirective repeatable on FIELD
type Query {
string: String
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_without_directives():
sdl = dedent(
"""
type Query {
foo: String
}
"""
)
schema = build_schema(sdl)
introspection = introspection_from_schema(schema)
del introspection["__schema"]["directives"] # type: ignore
client_schema = build_client_schema(introspection)
assert schema.directives
assert client_schema.directives == ()
assert print_schema(client_schema) == sdl
def builds_a_schema_aware_of_deprecation():
sdl = dedent(
'''
directive @someDirective(
"""This is a shiny new argument"""
shinyArg: SomeInputObject
"""This was our design mistake :("""
oldArg: String @deprecated(reason: "Use shinyArg")
) on QUERY
enum Color {
"""So rosy"""
RED
"""So grassy"""
GREEN
"""So calming"""
BLUE
"""So sickening"""
MAUVE @deprecated(reason: "No longer in fashion")
}
input SomeInputObject {
"""Nothing special about it, just deprecated for some unknown reason"""
oldField: String @deprecated(reason: "Don't use it, use newField instead!")
"""Same field but with a new name"""
newField: String
}
type Query {
"""This is a shiny string field"""
shinyString: String
"""This is a deprecated string field"""
deprecatedString: String @deprecated(reason: "Use shinyString")
"""Color of a week"""
color: Color
"""Some random field"""
someField(
"""This is a shiny new argument"""
shinyArg: SomeInputObject
"""This was our design mistake :("""
oldArg: String @deprecated(reason: "Use shinyArg")
): String
}
''' # noqa: E501
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_empty_deprecation_reasons():
sdl = dedent(
"""
directive @someDirective(someArg: SomeInputObject @deprecated(reason: "")) on QUERY
type Query {
someField(someArg: SomeInputObject @deprecated(reason: "")): SomeEnum @deprecated(reason: "")
}
input SomeInputObject {
someInputField: String @deprecated(reason: "")
}
enum SomeEnum {
SOME_VALUE @deprecated(reason: "")
}
""" # noqa: E501
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_specified_by_url():
sdl = dedent(
"""
scalar Foo @specifiedBy(url: "https://example.com/foo_spec")
type Query {
foo: Foo
}
"""
)
assert cycle_introspection(sdl) == sdl
def can_use_client_schema_for_limited_execution():
schema = build_schema(
"""
scalar CustomScalar
type Query {
foo(custom1: CustomScalar, custom2: CustomScalar): String
}
"""
)
introspection = introspection_from_schema(schema)
client_schema = build_client_schema(introspection)
class Data:
foo = "bar"
unused = "value"
result = graphql_sync(
client_schema,
"query Limited($v: CustomScalar) { foo(custom1: 123, custom2: $v) }",
root_value=Data(),
variable_values={"v": "baz"},
)
assert result.data == {"foo": "bar"}
def can_build_invalid_schema():
schema = build_schema("type Query", assume_valid=True)
introspection = introspection_from_schema(schema)
client_schema = build_client_schema(introspection, assume_valid=True)
assert client_schema.to_kwargs()["assume_valid"] is True
def describe_throws_when_given_invalid_introspection():
dummy_schema = build_schema(
"""
type Query {
foo(bar: String): String
}
interface SomeInterface {
foo: String
}
union SomeUnion = Query
enum SomeEnum { FOO }
input SomeInputObject {
foo: String
}
directive @SomeDirective on QUERY
"""
)
def throws_when_introspection_is_missing_schema_property():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
build_client_schema(None) # type: ignore
assert str(exc_info.value) == (
"Invalid or incomplete introspection result. Ensure that you"
" are passing the 'data' attribute of an introspection response"
" and no 'errors' were returned alongside: None."
)
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
build_client_schema({}) # type: ignore
assert str(exc_info.value) == (
"Invalid or incomplete introspection result. Ensure that you"
" are passing the 'data' attribute of an introspection response"
" and no 'errors' were returned alongside: {}."
)
def throws_when_referenced_unknown_type():
introspection = introspection_from_schema(dummy_schema)
introspection["__schema"]["types"] = [
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] != "Query"
]
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == (
"Invalid or incomplete schema, unknown type: Query."
" Ensure that a full introspection query is used"
" in order to build a client schema."
)
def throws_when_missing_definition_for_one_of_the_standard_scalars():
schema = build_schema(
"""
type Query {
foo: Float
}
"""
)
introspection = introspection_from_schema(schema)
introspection["__schema"]["types"] = [
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] != "Float"
]
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value).endswith(
"Invalid or incomplete schema, unknown type: Float."
" Ensure that a full introspection query is used"
" in order to build a client schema."
)
def throws_when_type_reference_is_missing_name():
introspection = introspection_from_schema(dummy_schema)
query_type = cast(IntrospectionType, introspection["__schema"]["queryType"])
assert query_type["name"] == "Query"
del query_type["name"] # type: ignore
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == "Unknown type reference: {}."
def throws_when_missing_kind():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
)
assert query_type_introspection["kind"] == "OBJECT"
del query_type_introspection["kind"]
with raises(
TypeError,
match=r"^Invalid or incomplete introspection result\."
" Ensure that a full introspection query is used"
r" in order to build a client schema: {'name': 'Query', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_interfaces():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = cast(
IntrospectionObjectType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
),
)
assert query_type_introspection["interfaces"] == []
del query_type_introspection["interfaces"] # type: ignore
with raises(
TypeError,
match="^Query interfaces cannot be resolved."
" Introspection result missing interfaces:"
r" {'kind': 'OBJECT', 'name': 'Query', .*}\.$",
):
build_client_schema(introspection)
def legacy_support_for_interfaces_with_null_as_interfaces_field():
introspection = introspection_from_schema(dummy_schema)
some_interface_introspection = cast(
IntrospectionInterfaceType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "SomeInterface"
),
)
assert some_interface_introspection["interfaces"] == []
some_interface_introspection["interfaces"] = None # type: ignore
client_schema = build_client_schema(introspection)
assert print_schema(client_schema) == print_schema(dummy_schema)
def throws_when_missing_fields():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = cast(
IntrospectionObjectType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
),
)
assert query_type_introspection["fields"]
del query_type_introspection["fields"] # type: ignore
with raises(
TypeError,
match="^Query fields cannot be resolved."
" Introspection result missing fields:"
r" {'kind': 'OBJECT', 'name': 'Query', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_field_args():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = cast(
IntrospectionObjectType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
),
)
field = query_type_introspection["fields"][0]
assert field["args"]
del field["args"] # type: ignore
with raises(
TypeError,
match="^Query fields cannot be resolved."
r" Introspection result missing field args: {'name': 'foo', .*}\.$",
):
build_client_schema(introspection)
def throws_when_output_type_is_used_as_an_arg_type():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = cast(
IntrospectionObjectType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
),
)
arg = query_type_introspection["fields"][0]["args"][0]
assert arg["type"]["name"] == "String"
arg["type"]["name"] = "SomeUnion"
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value).startswith(
"Query fields cannot be resolved."
" Introspection must provide input type for arguments,"
" but received: SomeUnion."
)
def throws_when_output_type_is_used_as_an_input_value_type():
introspection = introspection_from_schema(dummy_schema)
input_object_type_introspection = cast(
IntrospectionInputObjectType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "SomeInputObject"
),
)
input_field = input_object_type_introspection["inputFields"][0]
assert input_field["type"]["name"] == "String"
input_field["type"]["name"] = "SomeUnion"
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value).startswith(
"SomeInputObject fields cannot be resolved."
" Introspection must provide input type for input fields,"
" but received: SomeUnion."
)
def throws_when_input_type_is_used_as_a_field_type():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = cast(
IntrospectionObjectType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
),
)
field = query_type_introspection["fields"][0]
assert field["type"]["name"] == "String"
field["type"]["name"] = "SomeInputObject"
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value).startswith(
"Query fields cannot be resolved."
" Introspection must provide output type for fields,"
" but received: SomeInputObject."
)
def throws_when_missing_possible_types():
introspection = introspection_from_schema(dummy_schema)
some_union_introspection = cast(
IntrospectionUnionType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "SomeUnion"
),
)
assert some_union_introspection["possibleTypes"]
del some_union_introspection["possibleTypes"] # type: ignore
with raises(
TypeError,
match="^Introspection result missing possibleTypes:"
r" {'kind': 'UNION', 'name': 'SomeUnion', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_enum_values():
introspection = introspection_from_schema(dummy_schema)
some_enum_introspection = cast(
IntrospectionEnumType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "SomeEnum"
),
)
assert some_enum_introspection["enumValues"]
del some_enum_introspection["enumValues"] # type: ignore
with raises(
TypeError,
match="^Introspection result missing enumValues:"
r" {'kind': 'ENUM', 'name': 'SomeEnum', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_input_fields():
introspection = introspection_from_schema(dummy_schema)
some_input_object_introspection = cast(
IntrospectionInputObjectType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "SomeInputObject"
),
)
assert some_input_object_introspection["inputFields"]
del some_input_object_introspection["inputFields"] # type: ignore
with raises(
TypeError,
match="^Introspection result missing inputFields:"
r" {'kind': 'INPUT_OBJECT', 'name': 'SomeInputObject', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_directive_locations():
introspection = introspection_from_schema(dummy_schema)
some_directive_introspection = introspection["__schema"]["directives"][0]
assert some_directive_introspection["name"] == "SomeDirective"
assert some_directive_introspection["locations"] == ["QUERY"]
del some_directive_introspection["locations"] # type: ignore
with raises(
TypeError,
match="^Introspection result missing directive locations:"
r" {'name': 'SomeDirective', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_directive_args():
introspection = introspection_from_schema(dummy_schema)
some_directive_introspection = introspection["__schema"]["directives"][0]
assert some_directive_introspection["name"] == "SomeDirective"
assert some_directive_introspection["args"] == []
del some_directive_introspection["args"] # type: ignore
with raises(
TypeError,
match="^Introspection result missing directive args:"
r" {'name': 'SomeDirective', .*}\.$",
):
build_client_schema(introspection)
def describe_very_deep_decorators_are_not_supported():
def fails_on_very_deep_lists_more_than_7_levels():
schema = build_schema(
"""
type Query {
foo: [[[[[[[[String]]]]]]]]
}
"""
)
introspection = introspection_from_schema(schema)
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == (
"Query fields cannot be resolved."
" Decorated type deeper than introspection query."
)
def fails_on_a_very_deep_non_null_more_than_7_levels():
schema = build_schema(
"""
type Query {
foo: [[[[String!]!]!]!]
}
"""
)
introspection = introspection_from_schema(schema)
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == (
"Query fields cannot be resolved."
" Decorated type deeper than introspection query."
)
def succeeds_on_deep_types_less_or_equal_7_levels():
# e.g., fully non-null 3D matrix
sdl = dedent(
"""
type Query {
foo: [[[String!]!]!]!
}
"""
)
assert cycle_introspection(sdl) == sdl
def describe_prevents_infinite_recursion_on_invalid_introspection():
def recursive_interfaces():
sdl = """
type Query {
foo: Foo
}
type Foo {
foo: String
}
"""
schema = build_schema(sdl, assume_valid=True)
introspection = introspection_from_schema(schema)
foo_introspection = cast(
IntrospectionObjectType,
next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Foo"
),
)
assert foo_introspection["interfaces"] == []
# we need to patch here since invalid interfaces cannot be built with Python
foo_introspection["interfaces"] = [
{"kind": "OBJECT", "name": "Foo", "ofType": None}
]
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == (
"Foo interfaces cannot be resolved."
" Expected Foo to be a GraphQL Interface type."
)
def recursive_union():
sdl = """
type Query {
foo: Foo
}
union Foo
"""
schema = build_schema(sdl, assume_valid=True)
introspection = introspection_from_schema(schema)
foo_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Foo"
)
assert foo_introspection["kind"] == "UNION"
assert foo_introspection["possibleTypes"] == []
# we need to patch here since invalid unions cannot be built with Python
foo_introspection["possibleTypes"] = [
{"kind": "UNION", "name": "Foo", "ofType": None}
]
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == (
"Foo types cannot be resolved."
" Expected Foo to be a GraphQL Object type."
)
| graphql-python/graphql-core | tests/utilities/test_build_client_schema.py | Python | mit | 35,277 |
response.title = "Enter H4H"
response.subtitle = "Smart House4H Access Control"
response.meta.keywords = "arduino hacker space"
response.menu = [
(T('Gate'), False, URL('default','gate')),
(T('Door'), False, URL('default','door')),
(T('About'), False, URL('default','about')),
]
| house4hack/openSHAC | web2py_shac/applications/enter/models/menu.py | Python | mit | 282 |
# Enter your code here. Read input from STDIN. Print output to STDOUT
import re
t = int(raw_input())
for i in range(t):
try:
x = re.compile(raw_input())
if x:
print True
except:
print False | ugaliguy/HackerRank | Python/Errors-and-Exceptions/incorrect-regex.py | Python | mit | 246 |
import Pyro4
import Pyro4.errors
from diffiehellman import DiffieHellman
dh = DiffieHellman(group=14)
with Pyro4.locateNS() as ns:
uri = ns.lookup("example.dh.secretstuff")
print(uri)
p = Pyro4.Proxy(uri)
try:
p.process("hey")
raise RuntimeError("this should not be reached")
except Pyro4.errors.PyroError as x:
print("Error occured (expected!):", x)
with Pyro4.Proxy("PYRONAME:example.dh.keyexchange") as keyex:
print("exchange public keys...")
other_key = keyex.exchange_key(dh.public_key)
print("got server public key, creating shared secret key...")
dh.make_shared_secret_and_key(other_key)
print("setting key on proxy.")
p._pyroHmacKey = dh.key
print("Calling proxy again...")
result = p.process("hey")
print("Got reply:", result)
| irmen/Pyro4 | examples/diffie-hellman/client.py | Python | mit | 786 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
import frappe
import unittest
# test_records = frappe.get_test_records('OAuth Authorization Code')
class TestOAuthAuthorizationCode(unittest.TestCase):
pass
| mhbu50/frappe | frappe/integrations/doctype/oauth_authorization_code/test_oauth_authorization_code.py | Python | mit | 261 |
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from folium.plugins.marker_cluster import MarkerCluster
from folium.utilities import _validate_coordinates
from jinja2 import Template
class FastMarkerCluster(MarkerCluster):
"""
Add marker clusters to a map using in-browser rendering.
Using FastMarkerCluster it is possible to render 000's of
points far quicker than the MarkerCluster class.
Be aware that the FastMarkerCluster class passes an empty
list to the parent class' __init__ method during initialisation.
This means that the add_child method is never called, and
no reference to any marker data are retained. Methods such
as get_bounds() are therefore not available when using it.
Parameters
----------
data: list
List of list of shape [[], []]. Data points should be of
the form [[lat, lng]].
callback: string, default None
A string representation of a valid Javascript function
that will be passed a lat, lon coordinate pair. See the
FasterMarkerCluster for an example of a custom callback.
name : string, default None
The name of the Layer, as it will appear in LayerControls.
overlay : bool, default True
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening (only for overlays).
options : dict, default None
A dictionary with options for Leaflet.markercluster. See
https://github.com/Leaflet/Leaflet.markercluster for options.
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = (function(){
{{this._callback}}
var data = {{ this._data }};
var cluster = L.markerClusterGroup({{ this.options }});
for (var i = 0; i < data.length; i++) {
var row = data[i];
var marker = callback(row);
marker.addTo(cluster);
}
cluster.addTo({{ this._parent.get_name() }});
return cluster;
})();
{% endmacro %}""")
def __init__(self, data, callback=None, options=None,
name=None, overlay=True, control=True, show=True):
super(FastMarkerCluster, self).__init__(name=name, overlay=overlay,
control=control, show=show,
options=options)
self._name = 'FastMarkerCluster'
self._data = _validate_coordinates(data)
if callback is None:
self._callback = """
var callback = function (row) {
var icon = L.AwesomeMarkers.icon();
var marker = L.marker(new L.LatLng(row[0], row[1]));
marker.setIcon(icon);
return marker;
};"""
else:
self._callback = 'var callback = {};'.format(callback)
| QuLogic/folium | folium/plugins/fast_marker_cluster.py | Python | mit | 3,213 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AlertRuleResource
from ._models_py3 import AlertRuleResourceCollection
from ._models_py3 import AlertRuleResourcePatch
from ._models_py3 import AutoscaleNotification
from ._models_py3 import AutoscaleProfile
from ._models_py3 import AutoscaleSettingResource
from ._models_py3 import AutoscaleSettingResourceCollection
from ._models_py3 import AutoscaleSettingResourcePatch
from ._models_py3 import EmailNotification
from ._models_py3 import ErrorResponse
from ._models_py3 import EventCategoryCollection
from ._models_py3 import EventData
from ._models_py3 import EventDataCollection
from ._models_py3 import HttpRequestInfo
from ._models_py3 import LocalizableString
from ._models_py3 import LocationThresholdRuleCondition
from ._models_py3 import ManagementEventAggregationCondition
from ._models_py3 import ManagementEventRuleCondition
from ._models_py3 import MetricTrigger
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import Recurrence
from ._models_py3 import RecurrentSchedule
from ._models_py3 import Resource
from ._models_py3 import RuleAction
from ._models_py3 import RuleCondition
from ._models_py3 import RuleDataSource
from ._models_py3 import RuleEmailAction
from ._models_py3 import RuleManagementEventClaimsDataSource
from ._models_py3 import RuleManagementEventDataSource
from ._models_py3 import RuleMetricDataSource
from ._models_py3 import RuleWebhookAction
from ._models_py3 import ScaleAction
from ._models_py3 import ScaleCapacity
from ._models_py3 import ScaleRule
from ._models_py3 import ScaleRuleMetricDimension
from ._models_py3 import SenderAuthorization
from ._models_py3 import ThresholdRuleCondition
from ._models_py3 import TimeWindow
from ._models_py3 import WebhookNotification
except (SyntaxError, ImportError):
from ._models import AlertRuleResource # type: ignore
from ._models import AlertRuleResourceCollection # type: ignore
from ._models import AlertRuleResourcePatch # type: ignore
from ._models import AutoscaleNotification # type: ignore
from ._models import AutoscaleProfile # type: ignore
from ._models import AutoscaleSettingResource # type: ignore
from ._models import AutoscaleSettingResourceCollection # type: ignore
from ._models import AutoscaleSettingResourcePatch # type: ignore
from ._models import EmailNotification # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import EventCategoryCollection # type: ignore
from ._models import EventData # type: ignore
from ._models import EventDataCollection # type: ignore
from ._models import HttpRequestInfo # type: ignore
from ._models import LocalizableString # type: ignore
from ._models import LocationThresholdRuleCondition # type: ignore
from ._models import ManagementEventAggregationCondition # type: ignore
from ._models import ManagementEventRuleCondition # type: ignore
from ._models import MetricTrigger # type: ignore
from ._models import Operation # type: ignore
from ._models import OperationDisplay # type: ignore
from ._models import OperationListResult # type: ignore
from ._models import Recurrence # type: ignore
from ._models import RecurrentSchedule # type: ignore
from ._models import Resource # type: ignore
from ._models import RuleAction # type: ignore
from ._models import RuleCondition # type: ignore
from ._models import RuleDataSource # type: ignore
from ._models import RuleEmailAction # type: ignore
from ._models import RuleManagementEventClaimsDataSource # type: ignore
from ._models import RuleManagementEventDataSource # type: ignore
from ._models import RuleMetricDataSource # type: ignore
from ._models import RuleWebhookAction # type: ignore
from ._models import ScaleAction # type: ignore
from ._models import ScaleCapacity # type: ignore
from ._models import ScaleRule # type: ignore
from ._models import ScaleRuleMetricDimension # type: ignore
from ._models import SenderAuthorization # type: ignore
from ._models import ThresholdRuleCondition # type: ignore
from ._models import TimeWindow # type: ignore
from ._models import WebhookNotification # type: ignore
from ._monitor_management_client_enums import (
ComparisonOperationType,
ConditionOperator,
EventLevel,
MetricStatisticType,
RecurrenceFrequency,
ScaleDirection,
ScaleRuleMetricDimensionOperationType,
ScaleType,
TimeAggregationOperator,
TimeAggregationType,
)
__all__ = [
'AlertRuleResource',
'AlertRuleResourceCollection',
'AlertRuleResourcePatch',
'AutoscaleNotification',
'AutoscaleProfile',
'AutoscaleSettingResource',
'AutoscaleSettingResourceCollection',
'AutoscaleSettingResourcePatch',
'EmailNotification',
'ErrorResponse',
'EventCategoryCollection',
'EventData',
'EventDataCollection',
'HttpRequestInfo',
'LocalizableString',
'LocationThresholdRuleCondition',
'ManagementEventAggregationCondition',
'ManagementEventRuleCondition',
'MetricTrigger',
'Operation',
'OperationDisplay',
'OperationListResult',
'Recurrence',
'RecurrentSchedule',
'Resource',
'RuleAction',
'RuleCondition',
'RuleDataSource',
'RuleEmailAction',
'RuleManagementEventClaimsDataSource',
'RuleManagementEventDataSource',
'RuleMetricDataSource',
'RuleWebhookAction',
'ScaleAction',
'ScaleCapacity',
'ScaleRule',
'ScaleRuleMetricDimension',
'SenderAuthorization',
'ThresholdRuleCondition',
'TimeWindow',
'WebhookNotification',
'ComparisonOperationType',
'ConditionOperator',
'EventLevel',
'MetricStatisticType',
'RecurrenceFrequency',
'ScaleDirection',
'ScaleRuleMetricDimensionOperationType',
'ScaleType',
'TimeAggregationOperator',
'TimeAggregationType',
]
| Azure/azure-sdk-for-python | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2015_04_01/models/__init__.py | Python | mit | 6,678 |
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# Author: Mauro Soria
class Path(object):
def __init__(self, path=None, status=None, response=None):
self.path = path
self.status = status
self.response = response
def __str__(self):
return self.path | Yukinoshita47/Yuki-Chan-The-Auto-Pentest | Module/dirsearch/lib/core/Path.py | Python | mit | 987 |
# mailstat.console
# Console utilities for mailstat
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Sun Dec 29 15:57:44 2013 -0600
#
# Copyright (C) 2013 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: console.py [] [email protected] $
"""
Console utilities for mailstat
"""
##########################################################################
## Imports
##########################################################################
import os
import json
import baker
from datetime import datetime
from mailstat.analyze import Analysis
from mailstat.exceptions import ConsoleError
from mailstat.utils.testgen import TestDataGenerator
##########################################################################
## Helper functions
##########################################################################
def working_output(name="output", timestamp=False, dtfmt="%Y%m%d"):
"""
Helper function that creates a file path from the current working dir
and the name provided as the first argument. Optionally will add the
time as a format string to the name object.
"""
cwd = os.getcwd()
if timestamp:
try:
name = name % datetime.now().strftime(dtfmt)
except TypeError:
raise ConsoleError("If timestamp is provied, a format string must be used.")
return os.path.join(cwd, name)
##########################################################################
## Command line functions
##########################################################################
@baker.command
def testdata(names, domains, fixture=None, output=None):
"""
Generates a testdata set from a names and domains file
:param names: a list of names to use
:param domains: a list of domains to use
:param fixture: already created data to anonymize
:param output: where to write the test fixture
"""
output = output or working_output("email_metrics.csv")
generator = TestDataGenerator(names, domains, fixture)
generator.write(output)
@baker.command(default=True)
def analyze(emails, output=None):
"""
Perform analysis of email csv and output HTML report
:param emails: The email csv generated by MineMyMail
:param output: The path to output the report
"""
output = output or working_output("report-%s.json", True)
analysis = Analysis(emails)
analysis.analyze()
with open(output, 'w') as outfile:
json.dump(analysis.serialize(), outfile, indent=2)
| bbengfort/email-analysis | mailstat/console.py | Python | mit | 2,516 |
import micropython
micropython.alloc_emergency_exception_buf(100)
import pyb
import micropython
class Heartbeat(object):
def __init__(self):
self.tick = 0
self.led = pyb.LED(4) # 4 = Blue
tim = pyb.Timer(4)
tim.init(freq=10)
tim.callback(self.heartbeat_cb)
def heartbeat_cb(self, tim):
if self.tick <= 3:
self.led.toggle()
self.tick = (self.tick + 1) % 10
class serial_speed_test(object):
def __init__(self, freq_Hz):
self.tick = 0
self.freq_Hz = freq_Hz
tim1 = pyb.Timer(1)
tim1.init(freq=freq_Hz)
tim1.callback(self.serial_speed_test_cb)
def serial_speed_test_cb(self, tim1):
print(micros_timer.counter(), ',', 40*self.tick)
self.tick = (self.tick + 1) % 100
micropython.alloc_emergency_exception_buf(100)
micros_timer = pyb.Timer(2, prescaler=83, period=0x3ffffff)
Heartbeat()
serial_speed_test(10)
| gregnordin/micropython_pyboard | 150729_pyboard_to_pyqtgraph/pyboard_code.py | Python | mit | 954 |
from . import server
import sys
server.main(*sys.argv) | TeamNext/qos.py | qos/__main__.py | Python | mit | 55 |
#!/usr/bin/env python2
"""Hacked-together development server for feedreader.
Runs the feedreader server under the /api prefix, serves URI not containing a
dot public/index.html, servers everything else to public.
"""
import logging
import tornado.ioloop
import tornado.web
from feedreader.config import ConnectionConfig, FeederConfig
import feedreader.config
import feedreader.main
class PrefixedFallbackHandler(tornado.web.FallbackHandler):
"""FallbackHandler that removes the given prefix from requests."""
def prepare(self):
# hacky way of removing /api/
self.request.uri = self.request.uri[4:]
self.request.path = self.request.path[4:]
super(PrefixedFallbackHandler, self).prepare()
class SingleFileHandler(tornado.web.StaticFileHandler):
"""FileHandler that only reads a single static file."""
@classmethod
def get_absolute_path(cls, root, path):
return tornado.web.StaticFileHandler.get_absolute_path(root,
"index.html")
def main():
logging.basicConfig(format='[%(levelname)s][%(name)s]: %(message)s')
logging.getLogger().setLevel(logging.DEBUG)
feeder_config = FeederConfig.from_args()
conn_config = ConnectionConfig.from_file(feeder_config.conn_filepath)
feedreader_app = feedreader.main.get_application(feeder_config,
conn_config)
application = tornado.web.Application([
(r"/api/(.*)", PrefixedFallbackHandler, dict(fallback=feedreader_app)),
(r"/(.*\..*)", tornado.web.StaticFileHandler, {"path": "public"}),
(r"/(.*)", SingleFileHandler, {"path": "public"}),
])
application.listen(feeder_config.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| tdryer/feeder | run.py | Python | mit | 1,843 |
__author__ = 'heddevanderheide'
# Django specific
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url('', include('fabric_interface.urls'))
) | Hedde/fabric_interface | src/main/urls.py | Python | mit | 179 |
from random import randint
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models, connection
from django.db.models.aggregates import Avg, Max
from polymorphic.models import PolymorphicModel
from solo.models import SingletonModel
class Judge(PolymorphicModel):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class HumanJudge(Judge):
user = models.OneToOneField(to=User, related_name='judge', null=True, default=None)
class AutomatedJudge(Judge):
ip_address = models.GenericIPAddressField(unique=True)
port = models.PositiveIntegerField(default=settings.DEFAULT_JUDGE_PORT)
@classmethod
def get_random_judge(cls):
count = cls.objects.count()
random_index = randint(0, count-1)
return cls.objects.all()[random_index]
class JudgeRequest(models.Model):
time = models.DateTimeField(auto_now_add=True)
is_closed = models.BooleanField(default=False)
feature = models.ForeignKey(to='features.Feature', related_name='judge_requests')
team = models.ForeignKey(to='teams.Team', related_name='judge_requests')
def __str__(self):
return '{} for {}'.format(str(self.team), str(self.feature))
@property
def assignee1(self):
return self.assignees.first() or None
@property
def assignee2(self):
if self.assignees.count() < 2:
return None
return self.assignees.last() or None
@property
def judge1_score(self):
return getattr(self.assignee1, 'score', '--')
@property
def judge2_score(self):
return getattr(self.assignee2, 'score', '--')
@property
def score(self):
return self.assignees.aggregate(score=Avg('score'))['score'] or 0
@property
def message(self):
assignee = self.assignees.first()
if not assignee:
return ""
return str(assignee.message)
@property
def is_passed(self):
if connection.vendor == 'postgresql':
from django.contrib.postgres.aggregates import BoolOr
agg = BoolOr
else:
agg = Max
return self.assignees.aggregate(is_passed=agg('is_passed'))['is_passed'] or False
class JudgeRequestAssignment(models.Model):
judge = models.ForeignKey(to=Judge, related_name='assignments')
score = models.FloatField(null=True, blank=True)
is_passed = models.BooleanField(default=False)
judge_request = models.ForeignKey(to=JudgeRequest, related_name='assignees')
message = models.TextField(null=True, blank=True)
def __str__(self):
return '{} assigned to {} with score {}'.format(str(self.judge),
str(self.judge_request),
str(self.score))
class Config(SingletonModel):
day = models.IntegerField(default=1)
is_frozen = models.BooleanField(default=False)
frozen_scoreboard = models.TextField(default="", blank=True)
assign_to_automated_judge = models.BooleanField(default=True)
timeout_minutes = models.PositiveIntegerField(default=10)
| Kianoosh76/webelopers-scoreboard | jury/models.py | Python | mit | 3,183 |
import time
import json
import redis
import subprocess
from subprocess import Popen, check_output
import shlex
import os
from py_cf_new_py3.chain_flow_py3 import CF_Base_Interpreter
from redis_graph_py3 import farm_template_py3
class Process_Control(object ):
def __init__(self):
pass
def run_process_to_completion(self,command_string, shell_flag = False, timeout_value = None):
try:
command_parameters = shlex.split(command_string)
return_value = check_output(command_parameters, stderr=subprocess.STDOUT , shell = shell_flag, timeout = timeout_value)
return [0,return_value.decode()]
except subprocess.CalledProcessError as cp:
return [ cp.returncode , cp.output.decode() ]
except :
return [-1,""]
def launch_process(self,command_string,stderr=None,shell=True):
command_parameters = shlex.split(command_string)
try:
process_handle = Popen(command_parameters, stderr=open(self.error_file,'w' ))
return [ True, process_handle ]
except:
return [False, None]
def monitor_process(self, process_handle):
returncode = process_handle.poll()
if returncode == None:
return [ True, 0]
else:
del process_handle
return [ False, returncode ]
def kill_process(self,process_handle):
try:
process_handle.kill()
process_handle.wait()
del process_handle
except:
pass
class Manage_A_Python_Process(Process_Control):
def __init__(self,command_string, restart_flag = True, error_directory = "/tmp"):
super(Process_Control,self)
self.restart_flag = restart_flag
command_string = "python3 "+command_string
self.command_string = command_string
command_list= shlex.split(command_string)
script_file_list = command_list[1].split("/")
self.script_file_name = script_file_list[-1].split(".")[0]
temp = error_directory + "/"+self.script_file_name
self.error_file = temp+".err"
self.error_file_rollover = temp +".errr"
self.error = False
self.enabled = True
self.active = False
def get_script(self):
return self.script_file_name
def launch(self):
if( (self.enabled == True) and (self.active == False )):
temp = self.launch_process(self.command_string, stderr=self.error_file)
return_value = temp[0]
self.handle = temp[1]
self.active = return_value
if self.active == False:
self.rollover()
self.error = True
else:
self.error = False
def monitor(self):
if self.enabled == True:
if self.active == True:
return_value = self.monitor_process(self.handle)
if return_value[0] == True:
return True
self.active = False
self.rollover()
if self.restart_flag == True:
self.launch()
return False
def rollover(self):
os.system("mv "+self.error_file+" " +self.error_file_rollover)
def kill(self):
self.active = False
self.error = False
self.enabled = False
self.kill_process(self.handle)
self.rollover()
class System_Control(object):
def __init__(self,
redis_handle,
error_queue_key,
web_command_queue_key,
web_process_data_key,
web_display_list_key,
command_string_list ):
self.redis_handle = redis_handle
self.error_queue_key = error_queue_key
self.web_command_queue_key = web_command_queue_key
self.web_process_data_key = web_process_data_key
self.web_display_list_key = web_display_list_key
self.command_string_list = command_string_list
self.startup_list = []
self.process_hash = {}
self.process_state = {}
for command_string in command_string_list:
temp_class = Manage_A_Python_Process( command_string )
python_script = temp_class.get_script()
self.startup_list.append(python_script)
self.process_hash[python_script] = temp_class
self.redis_handle.set(self.web_display_list_key,json.dumps(self.startup_list))
self.update_web_display()
def launch_processes( self,*unused ):
for script in self.startup_list:
temp = self.process_hash[script]
temp.launch()
if temp.error == True:
return_data = json.dumps({ "script": script, "error_file" : temp.temp.error_file_rollover})
self.redis_handle.publish(self.error_queue_key,return_data)
temp.error = False
def monitor( self, *unused ):
for script in self.startup_list:
temp = self.process_hash[script]
temp.monitor()
if temp.error == True:
return_data = json.dumps({ "script": script, "error_file" : temp.temp.error_file_rollover})
self.redis_handle.publish(self.error_queue_key,return_data)
temp.error = False
self.update_web_display()
def process_web_queue( self, *unused ):
if self.redis_handle.llen(self.web_command_queue_key) > 0 :
data_json = redis_handle.lpop(self.web_command_queue_key)
self.redis_handle.ltrim(self.web_command_queue_key,0,-1) # empty redis queue
data = json.loads(data_json)
print("made it here")
for script,item in data.items():
temp = self.process_hash[script]
try:
if item["enabled"] == True:
if temp.enabled == False:
temp.enabled = True
temp.active = False
#print(script,"---------------------------launch")
temp.launch()
else:
if temp.enabled == True:
temp.enabled = False
#print(script,"----------------------------kill")
temp.kill()
except:
pass
print(self.process_hash[script].active)
print(self.process_hash[script].enabled)
self.update_web_display()
def update_web_display(self):
process_state = {}
for script in self.startup_list:
temp = self.process_hash[script]
process_state[script] = {"name":script,"enabled":temp.enabled,"active":temp.active,"error":temp.error}
self.redis_handle.set(self.web_process_data_key,json.dumps(process_state))
def add_chains(self,cf):
cf.define_chain("initialization",True)
cf.insert.one_step(self.launch_processes)
cf.insert.enable_chains( ["monitor_web_command_queue","monitor_active_processes"] )
cf.insert.terminate()
cf.define_chain("monitor_web_command_queue", False)
cf.insert.wait_event_count( event = "TIME_TICK", count = 1)
cf.insert.one_step(self.process_web_queue)
cf.insert.reset()
cf.define_chain("monitor_active_processes",False)
cf.insert.wait_event_count( event = "TIME_TICK",count = 10)
cf.insert.one_step(self.monitor)
cf.insert.reset()
if __name__ == "__main__":
cf = CF_Base_Interpreter()
gm = farm_template_py3.Graph_Management(
"PI_1", "main_remote", "LaCima_DataStore")
process_data = gm.match_terminal_relationship("PROCESS_CONTROL")[0]
redis_data = process_data["redis"]
redis_handle = redis.StrictRedis(
host=redis_data["ip"], port=redis_data["port"], db=redis_data["db"], decode_responses=True)
web_command_queue_key =process_data['web_command_key']
error_queue_key = process_data['error_queue_key']
command_string_list = process_data["command_string_list"]
web_process_data_key = process_data["web_process_data"]
web_display_list_key = process_data["web_display_list"]
print(web_process_data_key,web_display_list_key)
system_control = System_Control( redis_handle = redis_handle,
error_queue_key = error_queue_key,
web_command_queue_key = web_command_queue_key,
web_process_data_key = web_process_data_key,
web_display_list_key = web_display_list_key,
command_string_list = command_string_list )
system_control.add_chains(cf)
cf.execute()
| glenn-edgar/local_controller_3 | process_control_py3.py | Python | mit | 9,357 |
# Django settings for obi project.
from os.path import abspath, dirname
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.db.backends.',
# Or path to database file if using sqlite3.
'NAME': '',
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
# Empty for localhost through domain sockets or '127.0.0.1'
# for localhost through TCP.
'HOST': '',
# Set to empty string for default.
'PORT': '',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = dirname(dirname(abspath(__file__))) + '/static'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '6(5f^qra2a%%(iok=0ktz)xnhx(-f7df3q(tuva4*0dz3c^ug4'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
)
ROOT_URLCONF = 'obi.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'obi.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.humanize',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'south',
'ui',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Be sure to create your own 'local_settings.py' file as described in README
try:
from local_settings import *
except ImportError:
pass
| adityadharne/TestObento | obi/obi/settings.py | Python | mit | 6,014 |
from rest_framework import serializers
from rest_auth.serializers import UserDetailsSerializer
class UserSerializer(UserDetailsSerializer):
website = serializers.URLField(source="userprofile.website", allow_blank=True, required=False)
about = serializers.CharField(source="userprofile.about", allow_blank=True, required=False)
class Meta(UserDetailsSerializer.Meta):
fields = UserDetailsSerializer.Meta.fields + ('website', 'about')
def update(self, instance, validated_data):
profile_data = validated_data.pop('userprofile', {})
website = profile_data.get('website')
about = profile_data.get('about')
instance = super(UserSerializer, self).update(instance, validated_data)
# get and update user profile
profile = instance.userprofile
if profile_data:
if website:
profile.website = website
if about:
profile.about = about
profile.save()
return instance
| ZachLiuGIS/reactjs-auth-django-rest | django_backend/user_profile/serializers.py | Python | mit | 1,016 |
# -*- coding: utf-8 -*-
"""
flask_via.examples.basic
========================
A simple ``Flask-Via`` example Flask application.
"""
from flask import Flask
from flask.ext.via import Via
from flask.ext.via.routers.default import Functional
app = Flask(__name__)
def foo(bar=None):
return 'Functional Foo View!'
routes = [
Functional('/foo', foo),
Functional('/foo/<bar>', foo, endpoint='foo2'),
]
via = Via()
via.init_app(app, routes_module='flask_via.examples.basic')
if __name__ == "__main__":
app.run(debug=True)
| thisissoon/Flask-Via | flask_via/examples/basic.py | Python | mit | 542 |
# -*- coding: utf-8 -*-
"""Functions for manipulating FGONG files. These are provided through
the **FGONG** object and a module function to read an **FGONG** object
from a file.
"""
import numpy as np
import warnings
from .adipls import fgong_to_amdl
from .constants import G_DEFAULT
from .utils import integrate, tomso_open, regularize
from .utils import FullStellarModel
def load_fgong(filename, fmt='ivers', return_comment=False,
return_object=True, G=None):
"""Given an FGONG file, returns NumPy arrays ``glob`` and ``var`` that
correspond to the scalar and point-wise variables, as specified
in the `FGONG format`_.
.. _FGONG format: https://www.astro.up.pt/corot/ntools/docs/CoRoT_ESTA_Files.pdf
Also returns the first four lines of the file as a `comment`, if
desired.
The version number ``ivers`` is used to infer the format of floats
if ``fmt='ivers'``.
If ``return_object`` is ``True``, instead returns an :py:class:`FGONG`
object. This is the default behaviour as of v0.0.12. The old
behaviour will be dropped completely from v0.1.0.
Parameters
----------
filename: str
Name of the FGONG file to read.
fmt: str, optional
Format string for floats in `glob` and `var`. If ``'ivers'``,
uses ``%16.9E`` if the file's ``ivers < 1000`` or ``%26.18E3` if
``ivers >= 1000``. If ``'auto'``, tries to guess the size of each
float. (default: 'ivers')
return_comment: bool, optional
If ``True``, return the first four lines of the FGONG file.
These are comments that are not used in any calculations.
Returns
-------
glob: NumPy array
The scalar (or global) variables for the stellar model
var: NumPy array
The point-wise variables for the stellar model. i.e. things
that vary through the star like temperature, density, etc.
comment: list of strs, optional
The first four lines of the FGONG file. These are comments
that are not used in any calculations. Only returned if
``return_comment=True``.
"""
with tomso_open(filename, 'rb') as f:
comment = [f.readline().decode('utf-8').strip() for i in range(4)]
nn, iconst, ivar, ivers = [int(i) for i in f.readline().decode('utf-8').split()]
# lines = f.readlines()
lines = [line.decode('utf-8').lower().replace('d', 'e')
for line in f.readlines()]
tmp = []
if fmt == 'ivers':
if ivers < 1000:
N = 16
else:
N = 27
# try to guess the length of each float in the data
elif fmt == 'auto':
N = len(lines[0])//5
else:
N = len(fmt % -1.111)
for line in lines:
for i in range(len(line)//N):
s = line[i*N:i*N+N]
# print(s)
if s[-9:] == '-Infinity':
s = '-Inf'
elif s[-9:] == ' Infinity':
s = 'Inf'
elif s.lower().endswith('nan'):
s = 'nan'
elif 'd' in s.lower():
s = s.lower().replace('d','e')
tmp.append(float(s))
glob = np.array(tmp[:iconst])
var = np.array(tmp[iconst:]).reshape((-1, ivar))
if return_object:
return FGONG(glob, var, ivers=ivers, G=G,
description=comment)
else:
warnings.warn("From tomso 0.1.0+, `fgong.load_fgong` will only "
"return an `FGONG` object: use `return_object=True` "
"to mimic future behaviour",
FutureWarning)
if return_comment:
return glob, var, comment
else:
return glob, var
def save_fgong(filename, glob, var, ivers=1300, comment=['','','',''],
float_formatter='ivers'):
"""Given data for an FGONG file in the format returned by
:py:meth:`~tomso.fgong.load_fgong` (i.e. two NumPy arrays and a
possible header), writes the data to a file.
This function will be dropped from v0.1.0 in favour of the `to_file`
function of the :py:class:`FGONG` object.
Parameters
----------
filename: str
Filename to which FGONG data is written.
glob: NumPy array
The global variables for the stellar model.
var: NumPy array
The point-wise variables for the stellar model. i.e. things
that vary through the star like temperature, density, etc.
ivers: int, optional
The integer indicating the version number of the file.
(default=1300)
comment: list of strs, optional
The first four lines of the FGONG file, which usually contain
notes about the stellar model.
float_formatter: str or function
Determines how floating point numbers are formatted. If
``'ivers'`` (the default), use the standard formats ``%16.9E``
if ``ivers < 1000`` or ``%26.18E3`` if ``ivers >= 1000``. If
a Python format specifier (e.g. ``'%16.9E'``), pass floats
into that like ``float_formatter % float``. Otherwise, must
be a function that takes a float as an argument and returns a
string. In most circumstances you'll want to control the
output by changing the value of ``'ivers'``.
"""
nn, ivar = var.shape
iconst = len(glob)
if float_formatter == 'ivers':
if ivers < 1000:
def ff(x):
if not np.isfinite(x):
return '%16s' % x
s = np.format_float_scientific(x, precision=9, unique=False, exp_digits=2, sign=True)
if s[0] == '+':
s = ' ' + s[1:]
return s
else:
def ff(x):
if not np.isfinite(x):
return '%27s' % x
s = np.format_float_scientific(x, precision=18, unique=False, exp_digits=3, sign=True)
if s[0] == '+':
s = ' ' + s[1:]
return ' ' + s
else:
try:
float_formatter % 1.111
ff = lambda x: float_formatter % x
except TypeError:
ff = float_formatter
with open(filename, 'wt') as f:
f.write('\n'.join(comment) + '\n')
line = '%10i'*4 % (nn, iconst, ivar, ivers) + '\n'
f.write(line)
for i, val in enumerate(glob):
f.write(ff(val))
if i % 5 == 4:
f.write('\n')
if i % 5 != 4:
f.write('\n')
for row in var:
for i, val in enumerate(row):
f.write(ff(val))
if i % 5 == 4:
f.write('\n')
if i % 5 != 4:
f.write('\n')
def fgong_get(key_or_keys, glob, var, reverse=False, G=G_DEFAULT):
"""Retrieves physical properties of a FGONG model from the ``glob`` and
``var`` arrays.
This function will be dropped from v0.1.0 in favour of the
attributes of the :py:class:`FGONG` object.
Parameters
----------
key_or_keys: str or list of strs
The desired variable or a list of desired variables. Current
options are:
- ``M``: total mass (float)
- ``R``: photospheric radius (float)
- ``L``: total luminosity (float)
- ``r``: radius (array)
- ``x``: fractional radius (array)
- ``m``: mass co-ordinate (array)
- ``q``: fractional mass co-ordinate (array)
- ``g``: gravity (array)
- ``rho``: density (array)
- ``P``: pressure (array)
- ``AA``: Ledoux discriminant (array)
- ``Hp``: pressure scale height (array)
- ``Hrho``: density scale height (array)
- ``G1``: first adiabatic index (array)
- ``T``: temperature (array)
- ``X``: hydrogen abundance (array)
- ``L_r``: luminosity at radius ``r`` (array)
- ``kappa``: opacity (array)
- ``epsilon``: specific energy generation rate (array)
- ``cp``: specific heat capacity (array)
- ``cs2``: sound speed squared (array)
- ``cs``: sound speed (array)
- ``tau``: acoustic depth (array)
For example, if ``glob`` and ``var`` have been returned from
:py:meth:`~tomso.fgong.load_fgong`, you could use
>>> M, m = fgong.fgong_get(['M', 'm'], glob, var)
to get the total mass and mass co-ordinate. If you only want
one variable, you don't need to use a list. The return type
is just the one corresponding float or array. So, to get a
single variable you could use either
>>> x, = fgong.fgong_get(['x'], glob, var)
or
>>> x = fgong.fgong_get('x', glob, var)
glob: NumPy array
The scalar (or global) variables for the stellar model
var: NumPy array
The point-wise variables for the stellar model. i.e. things
that vary through the star like temperature, density, etc.
reverse: bool (optional)
If ``True``, reverse the arrays so that the first element is
the centre.
G: float (optional)
Value of the gravitational constant.
Returns
-------
output: list of floats and arrays
A list returning the floats or arrays in the order requested
by the parameter ``keys``.
"""
M, R, L = glob[:3]
r, lnq, T, P, rho, X, L_r, kappa, epsilon, G1 = var[:,:10].T
cp = var[:,12]
AA = var[:,14]
x = r/R
q = np.exp(lnq)
m = q*M
g = G*m/r**2
Hp = P/(rho*g)
Hrho = 1/(1/G1/Hp + AA/r)
cs2 = G1*P/rho # square of the sound speed
cs = np.sqrt(cs2)
if np.all(np.diff(x) < 0):
tau = -integrate(1./cs, r) # acoustic depth
else:
tau = integrate(1./cs[::-1], r[::-1])[::-1]
tau = np.max(tau)-tau
if type(key_or_keys) == str:
keys = [key_or_keys]
just_one = True
else:
keys = key_or_keys
just_one = False
I = np.arange(len(var), dtype=int)
if reverse:
I = I[::-1]
output = []
for key in keys:
if key == 'M': output.append(M)
elif key == 'R': output.append(R)
elif key == 'L': output.append(L)
elif key == 'r': output.append(r[I])
elif key == 'x': output.append(x[I])
elif key == 'm': output.append(m[I])
elif key == 'q': output.append(q[I])
elif key == 'g': output.append(g[I])
elif key == 'rho': output.append(rho[I])
elif key == 'P': output.append(P[I])
elif key == 'AA': output.append(AA[I])
elif key == 'Hp': output.append(Hp[I])
elif key == 'Hrho': output.append(Hrho[I])
elif key == 'G1': output.append(G1[I])
elif key == 'T': output.append(T[I])
elif key == 'X': output.append(X[I])
elif key == 'L_r': output.append(L_r[I])
elif key == 'kappa': output.append(kappa[I])
elif key == 'epsilon': output.append(epsilon[I])
elif key == 'cp': output.append(cp[I])
elif key == 'cs2': output.append(cs2[I])
elif key == 'cs': output.append(cs[I])
elif key == 'tau': output.append(tau[I])
else: raise ValueError('%s is not a valid key for fgong.fgong_get' % key)
if just_one:
assert(len(output) == 1)
return output[0]
else:
return output
class FGONG(FullStellarModel):
"""A class that contains and allows one to manipulate the data in a
stellar model stored in the `FGONG format`_.
.. _FGONG format: https://www.astro.up.pt/corot/ntools/docs/CoRoT_ESTA_Files.pdf
The main attributes are the **glob** and **var** arrays, which
follow the definitions in the FGONG standard. The data in these
arrays can be accessed via the attributes with more
physically-meaningful names (e.g. the radius is ``FGONG.r``).
Some of these values can also be set via the attributes if doing
so is unambiguous. For example, the fractional radius **x** is not a
member of the **var** array but setting **x** will assign the actual
radius **r**, which is the first column of **var**. Values that are
settable are indicated in the list of parameters.
Parameters
----------
glob: NumPy array
The global variables for the stellar model.
var: NumPy array
The point-wise variables for the stellar model. i.e. things
that vary through the star like temperature, density, etc.
ivers: int, optional
The integer indicating the version number of the file.
(default=0)
G: float, optional
Value for the gravitational constant. If not given (which is
the default behaviour), we use ``glob[14]`` if it exists and
is close to the module-wide default value. Otherwise, we use
the module-wide default value.
description: list of 4 strs, optional
The first four lines of the FGONG file, which usually contain
notes about the stellar model.
Attributes
----------
iconst: int
number of global data entries (i.e. length of **glob**)
nn: int
number of points in stellar model (i.e. number of rows in **var**)
ivar: int
number of variables recorded at each point in stellar model
(i.e. number of columns in **var**)
M: float, settable
total mass
R: float, settable
photospheric radius
L: float, settable
total luminosity
Teff: float
effective temperature, derived from luminosity and radius
r: NumPy array, settable
radius co-ordinate
lnq: NumPy array, settable
natural logarithm of the fractional mass co-ordinate
T: NumPy array, settable
temperature
P: NumPy array, settable
pressure
rho: NumPy array, settable
density
X: NumPy array, settable
fractional hydrogen abundance (by mass)
L_r: NumPy array, settable
luminosity at radius **r**
kappa: NumPy array, settable
Rosseland mean opacity
epsilon: NumPy array, settable
specific energy generation rate
Gamma_1: NumPy array, settable
first adiabatic index, aliased by **G1**
G1: NumPy array, settable
first adiabatic index, alias of **Gamma_1**
cp: NumPy array, settable
specific heat capacity
AA: NumPy array, settable
Ledoux discriminant
Z: NumPy array, settable
metal abundance
x: NumPy array, settable
fractional radius co-ordinate
q: NumPy array, settable
fractional mass co-ordinate
m: NumPy array, settable
mass co-ordinate
g: NumPy array
local gravitational acceleration
Hp: NumPy array
pressure scale height
Hrho: NumPy array
density scale height
N2: NumPy array
squared Brunt–Väisälä (angular) frequency
cs2: NumPy array
squared adiabatic sound speed
cs: NumPy array
adiabatic sound speed
U: NumPy array
homology invariant *dlnm/dlnr*
V: NumPy array
homology invariant *dlnP/dlnr*
Vg: NumPy array
homology invariant *V/Gamma_1*
tau: NumPy array
acoustic depth
"""
def __init__(self, glob, var, ivers=300, G=None,
description=['', '', '', '']):
self.ivers = ivers
self.glob = glob
self.var = var
self.description = description
# if G is None, use glob[14] if it exists and looks like a
# reasonable value of G
if G is None:
if len(glob) >= 14 and np.isclose(glob[14], G_DEFAULT,
rtol=1e-3, atol=0.01e-8):
self.G = glob[14]
else:
self.G = G_DEFAULT
else:
self.G = G
def __len__(self):
return len(self.var)
def __repr__(self):
with np.printoptions(threshold=10):
return('FGONG(\nglob=\n%s,\nvar=\n%s,\ndescription=\n%s)' % (self.glob, self.var, '\n'.join(self.description)))
def to_file(self, filename, float_formatter='ivers'):
"""Save the model to an FGONG file.
Parameters
----------
filename: str
Filename to which the data is written.
float_formatter: str or function
Determines how floating point numbers are formatted. If
``'ivers'`` (the default), use the standard formats
``%16.9E`` if ``ivers < 1000`` or ``%26.18E3`` if ``ivers
>= 1000``. If a Python format specifier
(e.g. ``'%16.9E'``), pass floats into that like
``float_formatter % float``. Otherwise, must be a
function that takes a float as an argument and returns a
string. In most circumstances you'll want to control the
output by changing the value of ``'ivers'``.
"""
save_fgong(filename, self.glob, self.var,
ivers=self.ivers, comment=self.description,
float_formatter=float_formatter)
def to_amdl(self):
"""Convert the model to an ``ADIPLSStellarModel`` object."""
from .adipls import ADIPLSStellarModel
return ADIPLSStellarModel(
*fgong_to_amdl(self.glob, self.var, G=self.G), G=self.G)
def to_gyre(self, version=None):
"""Convert the model to a ``GYREStellarModel`` object.
Parameters
----------
version: int, optional
Specify GYRE format version number times 100. i.e.,
``version=101`` produce a file with data version 1.01. If
``None`` (the default), the latest version available in
TOMSO is used.
"""
from .gyre import gyre_header_dtypes, gyre_data_dtypes, GYREStellarModel
if version is None:
version = max([k for k in gyre_header_dtypes.keys()])
header = np.zeros(1, gyre_header_dtypes[version])
header['M'] = self.glob[0]
header['R'] = self.glob[1]
header['L'] = self.glob[2]
if version > 1:
header['version'] = version
data = np.zeros(self.nn, gyre_data_dtypes[version])
# data['r'] = self.var[:,0]
# data['T'] = self.var[:,2]
# data['P'] = self.var[:,3]
# data['rho'] = self.var[:,4]
# if np.all(np.diff(data['r']) <= 0):
# return GYREStellarModel(header, data[::-1], G=self.G)
# else:
# return GYREStellarModel(header, data, G=self.G)
g = GYREStellarModel(header[0], data, G=self.G)
g.r = self.r
g.m = self.m
g.T = self.T
g.P = self.P
g.rho = self.rho
g.Gamma_1 = self.Gamma_1
g.N2 = self.N2
g.kappa = self.kappa
g.L_r = self.L_r
g.data['nabla_ad'] = self.var[:,10]
g.data['delta'] = self.var[:,11]
# The definitions of epsilon in FGONG and GYRE formats might
# be different. Compute non-adiabatic modes at your peril!
if version < 101:
g.data['eps_tot'] = self.epsilon
else:
g.data['eps'] = self.epsilon
if np.all(np.diff(g.r) <= 0):
g.data = g.data[::-1]
g.data['k'] = np.arange(self.nn) + 1
return g
# FGONG parameters that can be derived from data
@property
def iconst(self): return len(self.glob)
@property
def nn(self): return self.var.shape[0]
@property
def ivar(self): return self.var.shape[1]
# Various properties for easier access to the data in `glob` and
# `var`.
@property
def M(self): return self.glob[0]
@M.setter
def M(self, val): self.glob[0] = val
@property
def R(self): return self.glob[1]
@R.setter
def R(self, val): self.glob[1] = val
@property
def L(self): return self.glob[2]
@L.setter
def L(self, val): self.glob[2] = val
@property
def r(self): return self.var[:,0]
@r.setter
def r(self, val):
self.var[:,0] = val
self.var[:,17] = self.R-val
@property
def lnq(self): return self.var[:,1]
@lnq.setter
def lnq(self, val): self.var[:,1] = val
@property
def T(self): return self.var[:,2]
@T.setter
def T(self, val): self.var[:,2] = val
@property
def P(self): return self.var[:,3]
@P.setter
def P(self, val): self.var[:,3] = val
@property
def rho(self): return self.var[:,4]
@rho.setter
def rho(self, val): self.var[:,4] = val
@property
def X(self): return self.var[:,5]
@X.setter
def X(self, val): self.var[:,5] = val
@property
def L_r(self): return self.var[:,6]
@L_r.setter
def L_r(self, val): self.var[:,6] = val
@property
def kappa(self): return self.var[:,7]
@kappa.setter
def kappa(self): self.var[:,7] = val
@property
def epsilon(self): return self.var[:,8]
@epsilon.setter
def epsilon(self, val): self.var[:,8] = val
@property
def Gamma_1(self): return self.var[:,9]
@Gamma_1.setter
def Gamma_1(self, val): self.var[:,9] = val
@property
def G1(self): return self.var[:,9]
@G1.setter
def G1(self, val): self.var[:,9] = val
@property
def grad_a(self): return self.var[:,10]
@grad_a.setter
def grad_a(self, val): self.var[:,10] = val
@property
def cp(self): return self.var[:,12]
@cp.setter
def cp(self, val): self.var[:,12] = val
@property
def AA(self): return self.var[:,14]
@AA.setter
def AA(self, val): self.var[:,14] = val
@property
def Z(self): return self.var[:,16]
@Z.setter
def Z(self, val): self.var[:,16] = val
# Some convenient quantities derived from `glob` and `var`.
@property
def x(self): return self.r/self.R
@x.setter
def x(self, val): self.r = val*self.R
@property
def q(self): return np.exp(self.lnq)
@q.setter
def q(self, val): self.lnq = np.log(val)
@property
def m(self): return self.q*self.M
@m.setter
def m(self, val): self.q = val/self.M
@property
@regularize()
def N2(self): return self.AA*self.g/self.r
@property
@regularize(y0=3)
def U(self): return 4.*np.pi*self.rho*self.r**3/self.m
@property
@regularize()
def V(self): return self.G*self.m*self.rho/self.P/self.r
@property
def Vg(self): return self.V/self.Gamma_1
@property
def tau(self):
if np.all(np.diff(self.x) < 0):
return -integrate(1./self.cs, self.r)
else:
tau = integrate(1./self.cs[::-1], self.r[::-1])[::-1]
return np.max(tau)-tau
# - ``G1``: first adiabatic index (array)
| warrickball/tomso | tomso/fgong.py | Python | mit | 22,737 |
"""Automatically format references in a LaTeX file."""
import argparse
from multiprocessing import Pool
from reference_utils import Reference, extract_bibtex_items
from latex_utils import read_latex_file, write_latex_file
class ReferenceFormatter:
def __init__(self, add_arxiv):
self.add_arxiv = add_arxiv
def get_reference(self, bibtex_entry):
"""Wrapper for multithreading."""
reference = Reference(bibtex_entry.rstrip(), self.add_arxiv)
reference.main()
return reference.bibitem_data, reference.bibitem_identifier, reference.reformatted_original_reference, reference.formatted_reference
def format_references(self, latex_source):
"""Format all references in the given LaTeX source."""
bibtex_entries = extract_bibtex_items(latex_source)
# Parallelising the reference lookup gives a 15x speedup.
# Values larger than 15 for the poolsize do not give a further speedup.
with Pool(15) as pool:
res = pool.map(self.get_reference, bibtex_entries)
for r in res:
bibitem_data, bibitem_identifier, reformatted_original_reference, formatted_reference = r
latex_source = latex_source.replace(bibitem_data, f"\\bibitem{{{bibitem_identifier}}} \\textcolor{{red}}{{TODO}}\n{reformatted_original_reference}\n\n%{formatted_reference}\n\n\n")
return latex_source
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('latex_file')
parser.add_argument('--add_arxiv', action="store_true")
args = parser.parse_args()
latex_source = read_latex_file(args.latex_file)
print("Processing references...")
reference_formatter = ReferenceFormatter(args.add_arxiv)
latex_source = reference_formatter.format_references(latex_source)
write_latex_file(args.latex_file, latex_source)
| teunzwart/latex-production-tools | reference_formatter.py | Python | mit | 1,870 |
import sqlite3
import requests
from random import sample
import textwrap
from printer import ThermalPrinter
LINE_WIDTH = 32
potm = "http://creepypasta.wikia.com/api/v1/Articles/List?category=PotM&limit=1000"
spotlighted = "http://creepypasta.wikia.com/api/v1/Articles/List?category=Spotlighted_Pastas&limit=1000"
def get_json_from_url(url):
return requests.get(url).json()
def get_ids_from_article_list(data):
return [item['id'] for item in data['items']]
def get_ids_from_url(url):
data = get_json_from_url(url)
return get_ids_from_article_list(data)
def get_id_list():
each = get_ids_from_url(potm) + get_ids_from_url(spotlighted)
return each
def get_newest_story(c, story_list):
if len(story_list) == 0:
return "NO STORIES FOUND"
first = story_list[0]
c.execute("INSERT INTO `visited` (`source`, `source_id`) VALUES (?, ?)", ('creepypasta.wikia.com', first))
story_data = get_json_from_url("http://creepypasta.wikia.com/api/v1/Articles/AsSimpleJson?id=%s" % first)
return story_data
def strip_printed_stories(c, story_list, source):
existing_ids = [item[0] for item in c.execute("SELECT source_id FROM `visited` WHERE `source`='%s'" % source)]
return [story for story in story_list if story not in existing_ids]
def parse_list_item(item):
return textwrap.fill("* %s" % item['text'], LINE_WIDTH)
def parse_content_item(item):
if item['type'] == 'paragraph':
return textwrap.fill(item['text'], LINE_WIDTH)
elif item['type'] == 'list':
return "\n".join(parse_list_item(li) for li in item['elements'])
return ''
def parse_content_list(section):
return "\n".join(parse_content_item(item) for item in section['content'])
def parse_title(section):
# CENTRE ME
return "\n" + textwrap.fill(section['title'], LINE_WIDTH)
def parse_section(section):
return "\n".join([parse_title(section), parse_content_list(section)])
def parse_story(data):
sections = [parse_section(section) for section in data['sections']]
return "\n".join(sections)
conn = sqlite3.connect('creepypasta.db')
c = conn.cursor()
ids = get_id_list()
stripped = strip_printed_stories(c, ids, "creepypasta.wikia.com")
shuffled = sample(ids, len(ids))
newest = get_newest_story(c, shuffled)
lines = parse_story(newest).encode('ascii', 'replace')
printer = ThermalPrinter()
for line in lines.split("\n"):
printer.print_text("\n" + line)
conn.commit()
conn.close()
| AngryLawyer/creepypasta-strainer | src_python/strainer.py | Python | mit | 2,457 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import show_and_tell_model
from inference_utils import inference_wrapper_base
class InferenceWrapper(inference_wrapper_base.InferenceWrapperBase):
"""Model wrapper class for performing inference with a ShowAndTellModel."""
def __init__(self):
super(InferenceWrapper, self).__init__()
def build_model(self, model_config):
model = show_and_tell_model.ShowAndTellModel(model_config, mode="inference")
model.build()
return model
def feed_image(self, sess, encoded_image):
initial_state = sess.run(fetches="lstm/initial_state:0",
feed_dict={"image_feed:0": encoded_image})
return initial_state
def inference_step(self, sess, input_feed, state_feed):
softmax_output, state_output = sess.run(
fetches=["softmax:0", "lstm/state:0"],
feed_dict={
"input_feed:0": input_feed,
"lstm/state_feed:0": state_feed,
})
return softmax_output, state_output, None
| hkhpub/show_and_tell_korean | webdemo/webdemo/inference_wrapper.py | Python | mit | 1,658 |
# -*- coding: utf-8 -*-
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as test_command
class PyTest(test_command):
user_options = [
('pytest-args=', 'a', 'Arguments for pytest'),
]
def initialize_options(self):
test_command.initialize_options(self)
self.pytest_target = []
self.pytest_args = []
def finalize_options(self):
test_command.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
version = '0.1.0'
setup_requires = [
'pytest'
]
tests_require = [
'pytest-timeout',
'mypy-lang',
]
setup(
name='algo_trade',
package=find_packages(),
setup_requires=setup_requires,
## install_requires=install_requires,
tests_require=tests_require,
cmdclass={'test': PyTest},
test_suite='test'
)
| sablet/algo_trade | setup.py | Python | mit | 985 |
import numpy as np
def random_flips(X):
"""
Take random x-y flips of images.
Input:
- X: (N, C, H, W) array of image data.
Output:
- An array of the same shape as X, containing a copy of the data in X,
but with half the examples flipped along the horizontal direction.
"""
N, C, H, W = X.shape
mask = np.random.randint(2, size=N)
# what this means is the ith image should be flipped with probability 1/2
out = np.zeros_like(X)
out[mask==1] = X[mask==1,:,:,::-1]
out[mask==0] = X[mask==0]
return out
def random_crops(X, crop_shape):
"""
Take random crops of images. For each input image we will generate a random
crop of that image of the specified size.
Input:
- X: (N, C, H, W) array of image data
- crop_shape: Tuple (HH, WW) to which each image will be cropped.
Output:
- Array of shape (N, C, HH, WW)
"""
N, C, H, W = X.shape
HH, WW = crop_shape
assert HH < H and WW < W
out = np.zeros((N, C, HH, WW), dtype=X.dtype)
np.random.randint((H-HH), size=N)
y_start = np.random.randint((H-HH), size=N) #(H-HH)*np.random.random_sample(N)
x_start = np.random.randint((W-WW), size=N) #(W-WW)*np.random.random_sample(N)
for i in xrange(N):
out[i] = X[i, :, y_start[i]:y_start[i]+HH, x_start[i]:x_start[i]+WW]
return out
def random_contrast(X, scale=(0.8, 1.2)):
"""
Randomly adjust the contrast of images. For each input image, choose a
number uniformly at random from the range given by the scale parameter,
and multiply each pixel of the image by that number.
Inputs:
- X: (N, C, H, W) array of image data
- scale: Tuple (low, high). For each image we sample a scalar in the
range (low, high) and multiply the image by that scaler.
Output:
- Rescaled array out of shape (N, C, H, W) where out[i] is a contrast
adjusted version of X[i].
"""
low, high = scale
N = X.shape[0]
out = np.zeros_like(X)
l = (scale[1]-scale[0])*np.random.random_sample(N)+scale[0]
# for i in xrange(N):
# out[i] = X[i] * l[i]
out = X * l[:,None,None,None]
# TODO: vectorize this somehow...
#out = #np.diag(l).dot(X)#X*l[:,np.newaxis, np.newaxis, np.newaxis]
return out
def random_tint(X, scale=(-10, 10)):
"""
Randomly tint images. For each input image, choose a random color whose
red, green, and blue components are each drawn uniformly at random from
the range given by scale. Add that color to each pixel of the image.
Inputs:
- X: (N, C, W, H) array of image data
- scale: A tuple (low, high) giving the bounds for the random color that
will be generated for each image.
Output:
- Tinted array out of shape (N, C, H, W) where out[i] is a tinted version
of X[i].
"""
low, high = scale
N, C = X.shape[:2]
out = np.zeros_like(X)
# for i in xrange(N):
# l = (scale[1]-scale[0])*np.random.random_sample(C)+scale[0]
# out[i] = X[i]+l[:,None,None]
l = (scale[1]-scale[0])*np.random.random_sample((N,C))+scale[0]
out = X+l[:,:,None,None]
return out
def fixed_crops(X, crop_shape, crop_type):
"""
Take center or corner crops of images.
Inputs:
- X: Input data, of shape (N, C, H, W)
- crop_shape: Tuple of integers (HH, WW) giving the size to which each
image will be cropped.
- crop_type: One of the following strings, giving the type of crop to
compute:
'center': Center crop
'ul': Upper left corner
'ur': Upper right corner
'bl': Bottom left corner
'br': Bottom right corner
Returns:
Array of cropped data of shape (N, C, HH, WW)
"""
N, C, H, W = X.shape
HH, WW = crop_shape
x0 = (W - WW) / 2
y0 = (H - HH) / 2
x1 = x0 + WW
y1 = y0 + HH
if crop_type == 'center':
return X[:, :, y0:y1, x0:x1]
elif crop_type == 'ul':
return X[:, :, :HH, :WW]
elif crop_type == 'ur':
return X[:, :, :HH, -WW:]
elif crop_type == 'bl':
return X[:, :, -HH:, :WW]
elif crop_type == 'br':
return X[:, :, -HH:, -WW:]
else:
raise ValueError('Unrecognized crop type %s' % crop_type)
| UltronAI/Deep-Learning | CS231n/reference/cnn_assignments-master/assignment3/cs231n/data_augmentation.py | Python | mit | 4,178 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-09-07 00:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payments', '0004_auto_20160904_0048'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='status',
field=models.CharField(choices=[('new', 'Created'), ('unconfirmed', 'Waiting for payment'), ('active', 'Active'), ('cancelled', 'Cancelled'), ('error', 'Error')], default='new', max_length=16),
),
]
| CCrypto/ccvpn3 | payments/migrations/0005_auto_20160907_0018.py | Python | mit | 612 |
#!/usr/bin/python
import requests
import json
# Import modules for CGI handling
import cgi, cgitb
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
user_title = form.getvalue('search_title')
print "Content-type: text/html\n\n";
# Setting attributes to send to Wikipedia API
baseurl = 'http://en.wikipedia.org/w/api.php'
search_atts = {}
search_atts['action'] = 'query'
search_atts['list'] = 'search'
search_atts['srwhat'] = 'text'
search_atts['format'] = 'json'
search_atts['srsearch'] = user_title
search_resp = requests.get(baseurl, params = search_atts)
search_data = search_resp.json()
title = search_data["query"]["search"][0]["title"]
# Make the title with no space which will be needed for making a url link to send for summary
title_w_no_space = ""
for i in title:
if i==" ":
title_w_no_space = title_w_no_space + "_"
else:
title_w_no_space = title_w_no_space + i
# Getting related topics using the result given by Wikipedia API
topics = []
for key in search_data["query"]["search"]:
topics.append (key["title"])
topics = topics [1:len(topics)]
# Summarizing the content:
# setting attributes for to send to Smmry API
link_for_smmry = 'https://en.wikipedia.org/wiki/' + title_w_no_space
smmry_base_url = 'http://api.smmry.com/'
#smmry_atts = {}
#smmry_atts ['SM_URL'] = 'https://en.wikipedia.org/wiki/Guyana'
#smmry_atts ['SM_API_KEY'] = '6F297A53E3' # represents your registered API key.
# Optional, X represents the webpage to summarize.
#smmry_atts ['SM_LENGTH'] = N # Optional, N represents the number of sentences returned, default is 7
#smmry_atts ['SM_KEYWORD_COUNT'] = N # Optional, N represents how many of the top keywords to return
#smmry_atts ['SM_QUOTE_AVOID'] # Optional, summary will not include quotations
#smmry_atts ['SM_WITH_BREAK'] # Optional, summary will contain string [BREAK] between each sentence
api_key_link = '&SM_API_KEY=9B07893CAD&SM_URL='
api_lenght = 'SM_LENGTH=7&SM_WITH_BREAK'
#print api_key_link
api_link = smmry_base_url + api_lenght + api_key_link + link_for_smmry
#smmry_resp = requests.get('http://api.smmry.com/&SM_API_KEY=6F297A53E3&SM_URL=https://en.wikipedia.org/wiki/Guyana')
smmry_resp = requests.get(api_link)
smmry_data = smmry_resp.json()
content= '<p>Try adding another key word.</p><a style="color:white;" id="backbtn" href="#" onclick="myFunction()" >Go back.</a>'
try:
content = smmry_data['sm_api_content']
except:
pass
content_with_non_ascii = ""
for word in content:
if ord(word) < 128:
content_with_non_ascii+=word
else:
content_with_non_ascii+= "?"
if len(content_with_non_ascii) >0:
content = content_with_non_ascii
# replacing "[BREAK]"s with a new line
while "[BREAK]" in content:
length = len (content)
break_position = content.find("[BREAK]")
content = content [0:break_position] + "<br><br>" + content [break_position+7: length]
print '<div id="all-cont-alt"><div class="select-nav"><div id="nav-top-main"><a id="backbtn" href="#" onclick="myFunction()" ><i style="float:left; position: relative;margin-left: 10px;top: 26px; color: #d8d8d8;" class= "fa fa-chevron-left fa-2x"></i></a><h1>Geddit</h1></div></div>'
print '<div id="loaddddd"></div><div id="contentss">'
print '<h1 id="user-title">'
print user_title
print "</h1>"
print content
print "</div></div>"
| azimos/geddit | old/geddit-backend.py | Python | mit | 3,469 |
#!/usr/bin/env python
import os
import sys
from setuptools import setup
os.system('make rst')
try:
readme = open('README.rst').read()
except FileNotFoundError:
readme = ""
setup(
name='leicaautomator',
version='0.0.2',
description='Automate scans on Leica SPX microscopes',
long_description=readme,
author='Arve Seljebu',
author_email='[email protected]',
url='https://github.com/arve0/leicaautomator',
packages=[
'leicaautomator',
],
package_dir={'leicaautomator': 'leicaautomator'},
include_package_data=True,
install_requires=[
'scipy',
'numpy',
'matplotlib',
'scikit-image',
'leicascanningtemplate',
'leicacam',
'leicaexperiment',
'microscopestitching',
'dask[bag]',
'numba',
],
license='MIT',
zip_safe=False,
keywords='leicaautomator',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
)
| arve0/leicaautomator | setup.py | Python | mit | 1,212 |
"""Forms of the aps_bom app."""
from csv import DictReader
from django import forms
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext as __
from .models import BOM, BOMItem, CBOM, CBOMItem, EPN, IPN, Unit
class BaseUploadForm(forms.ModelForm):
csv_file = forms.FileField()
def append_error(self, error_message, field="__all__"):
if field in self.errors:
self.errors[field].append(error_message)
else:
self.errors[field] = [error_message]
def clean(self):
cleaned_data = super(BaseUploadForm, self).clean()
# don't do anything with the file if there are errors already
if any(self.errors):
return cleaned_data
if self.Meta.model == CBOM:
self.company = self.cleaned_data['customer']
# initiate the csv dictreader with the uploaded file
temp_csv_file = self.files.get('csv_file')
with open(temp_csv_file.temporary_file_path(), 'rU') as csv_file:
self.reader = DictReader(csv_file)
self.clean_lines()
return cleaned_data
def clean_lines(self):
# update the fieldnames to match the ones on the model
new_names = []
for fieldname in self.reader.fieldnames:
new_names.append(fieldname.lower())
self.reader.fieldnames = new_names
# iterate over the lines and clean the values
self.clean_lines = []
for line_dict in self.reader:
for key, value in line_dict.iteritems():
# strip blank spaces
value = value.strip()
line_dict[key] = value
if key == 'consign':
if value == '0':
value = False
else:
value = True
line_dict[key] = value
if key == 'ipn':
try:
ipn = IPN.objects.get(code=value)
except IPN.DoesNotExist:
this_link = (
'<a href="{0}" target="_blank">{0}</a>'.format(
reverse('admin:aps_bom_ipn_add')))
self.append_error(__(
'The ipn "{0}" does not exist.'
' Please create it first. {1}'.format(
value, this_link)))
except IPN.MultipleObjectsReturned:
# TODO temporary workaround
self.append_error(__(
'There are multiple entries for the IPN "{0}".'
' Please resolve this error before'
' uploading.'.format(value)), field='ipn')
else:
line_dict[key] = ipn
if key == 'unit':
try:
unit = Unit.objects.get(code=value)
except Unit.DoesNotExist:
this_link = (
'<a href="{0}" target="_blank">{0}</a>'.format(
reverse('admin:aps_bom_unit_add')))
self.append_error(__(
'The unit "{0}" does not exist.'
' Please create it first. {1}'.format(
value, this_link)))
else:
line_dict[key] = unit
if key == 'epn':
try:
epn = EPN.objects.get(epn=value, company=self.company)
except EPN.DoesNotExist:
epn = EPN.objects.create(
description=line_dict.get('description'),
epn=value, company=self.company)
this_link = (
'<a href="{0}" target="_blank">{0}</a>'.format(
reverse('admin:aps_bom_epn_change',
args=(epn.id, ))))
self.append_error(__(
'The EPN "{0}" does not exist.'
' Please visit {1} to update it and then'
' re-upload the file.'.format(
value, this_link)))
else:
if epn.ipn is None or epn.cpn is None:
this_link = (
'<a href="{0}" target="_blank">{0}</a>'.format(
reverse('admin:aps_bom_epn_change',
args=(epn.id, ))))
self.append_error(__(
'The EPN "{0}" does not have all the'
' required data.'
' Please visit {1} to update it and then'
' re-upload the file.'.format(
value, this_link)))
else:
line_dict[key] = epn
if key == 'shape':
pass
line_dict.pop('description')
if 'shape' in line_dict:
line_dict.pop('shape')
self.clean_lines.append(line_dict)
return self.clean_lines
class BOMUploadForm(BaseUploadForm):
"""Custom ModelForm, that handles the upload for BOM.csv files."""
def __init__(self, *args, **kwargs):
super(BOMUploadForm, self).__init__(*args, **kwargs)
self.fields['ipn'].required = True
def save(self):
instance = super(BOMUploadForm, self).save()
for bomitemdict in self.clean_lines:
bomitemdict.update({'bom': instance})
BOMItem.objects.create(**bomitemdict)
return instance
class Meta:
model = BOM
fields = ['description', 'ipn']
class CBOMUploadForm(BaseUploadForm):
"""Custom ModelForm, that handles the upload for cBOM.csv files."""
def save(self):
instance = super(CBOMUploadForm, self).save()
for cbomitemdict in self.clean_lines:
cbomitemdict.update({'bom': instance})
CBOMItem.objects.create(**cbomitemdict)
return instance
class Meta:
model = CBOM
fields = [
'customer', 'description', 'html_link', 'product', 'version_date']
| bitmazk/django-aps-bom | aps_bom/forms.py | Python | mit | 6,549 |
# coding: utf-8
from django.db import models
from django.utils import timezone
from .cores import OssManager
_oss_manager = OssManager()
class StsToken(models.Model):
arn = models.CharField(max_length=500)
assumed_role_id = models.CharField(max_length=500)
access_key_id = models.CharField(max_length=500)
access_key_secret = models.CharField(max_length=500)
security_token = models.TextField()
purpose = models.CharField(max_length=100)
expiration = models.DateTimeField()
manager = _oss_manager
class Meta:
db_table = 'sts_token'
@property
def is_effective(self):
return timezone.now() <= self.expiration
| zhaowenxiang/chisch | oss/models.py | Python | mit | 677 |
"""Controller for rendering pod content."""
import datetime
import mimetypes
import os
import sys
import time
from grow.common import utils
from grow.documents import static_document
from grow.pods import errors
from grow.rendering import rendered_document
from grow.templates import doc_dependency
from grow.templates import tags
class Error(Exception):
"""Base rendering pool error."""
def __init__(self, message):
super(Error, self).__init__(message)
self.message = message
class UnknownKindError(Error):
"""Unknown kind of information."""
pass
class IgnoredPathError(Error):
"""Document is being served at an ignored path."""
pass
class RenderController(object):
"""Controls how the content is rendered and evaluated."""
def __init__(self, pod, serving_path, route_info, params=None, is_threaded=False):
self.pod = pod
self.serving_path = serving_path
self.route_info = route_info
self.params = params if params is not None else {}
self.render_timer = None
self.use_jinja = False
self.is_threaded = is_threaded
@staticmethod
def clean_source_dir(source_dir):
"""Clean the formatting of the source dir to format correctly."""
source_dir = source_dir.strip()
source_dir = source_dir.rstrip(os.path.sep)
return source_dir
@staticmethod
def from_route_info(pod, serving_path, route_info, params=None):
"""Create the correct controller based on the route info."""
if params is None:
params = {}
if route_info.kind == 'doc':
return RenderDocumentController(
pod, serving_path, route_info, params=params)
elif route_info.kind == 'static':
return RenderStaticDocumentController(
pod, serving_path, route_info, params=params)
elif route_info.kind == 'sitemap':
return RenderSitemapController(
pod, serving_path, route_info, params=params)
elif route_info.kind == 'error':
return RenderErrorController(
pod, serving_path, route_info, params=params)
raise UnknownKindError(
'Do not have a controller for: {}'.format(route_info.kind))
@property
def locale(self):
"""Locale to use for rendering."""
return None
@property
def mimetype(self):
"""Guess the mimetype of the content."""
return 'text/plain'
def get_http_headers(self):
"""Determine headers to serve for https requests."""
headers = {}
mimetype = self.mimetype
if mimetype:
headers['Content-Type'] = mimetype
return headers
def load(self, source_dir):
"""Load the pod content from file system."""
raise NotImplementedError
def render(self, jinja_env, request=None):
"""Render the pod content."""
raise NotImplementedError
def validate_path(self, *path_filters):
"""Validate that the path is valid against all filters."""
# Default test against the pod filter for deployment specific filtering.
path_filters = list(path_filters) or [self.pod.path_filter]
for path_filter in path_filters:
if not path_filter.is_valid(self.serving_path):
text = '{} is an ignored path.'
raise errors.RouteNotFoundError(text.format(self.serving_path))
class RenderDocumentController(RenderController):
"""Controller for handling rendering for documents."""
def __init__(self, pod, serving_path, route_info, params=None, is_threaded=False):
super(RenderDocumentController, self).__init__(
pod, serving_path, route_info, params=params, is_threaded=is_threaded)
self._doc = None
self.use_jinja = True
def __repr__(self):
return '<RenderDocumentController({})>'.format(self.route_info.meta['pod_path'])
@property
def doc(self):
"""Doc for the controller."""
if not self._doc:
pod_path = self.route_info.meta['pod_path']
locale = self.route_info.meta.get(
'locale', self.params.get('locale'))
self._doc = self.pod.get_doc(pod_path, locale=locale)
return self._doc
@property
def locale(self):
"""Locale to use for rendering."""
if 'locale' in self.route_info.meta:
return self.route_info.meta['locale']
return self.doc.locale if self.doc else None
@property
def mimetype(self):
"""Determine headers to serve for https requests."""
return mimetypes.guess_type(self.doc.view)[0]
@property
def pod_path(self):
"""Locale to use for rendering."""
if 'pod_path' in self.route_info.meta:
return self.route_info.meta['pod_path']
return self.doc.pod_path if self.doc else None
@property
def suffix(self):
"""Determine headers to serve for https requests."""
_, ext = os.path.splitext(self.doc.view)
if ext == '.html':
return 'index.html'
return ''
def load(self, source_dir):
"""Load the pod content from file system."""
timer = self.pod.profile.timer(
'RenderDocumentController.load',
label='{} ({})'.format(self.pod_path, self.locale),
meta={
'path': self.pod_path,
'locale': str(self.locale)}
).start_timer()
source_dir = self.clean_source_dir(source_dir)
# Validate the path with the config filters.
self.validate_path()
try:
doc = self.doc
serving_path = self.serving_path
if serving_path.endswith('/'):
serving_path = '{}{}'.format(serving_path, self.suffix)
rendered_path = '{}{}'.format(source_dir, serving_path)
rendered_content = self.pod.storage.read(rendered_path)
rendered_doc = rendered_document.RenderedDocument(
serving_path, rendered_content)
timer.stop_timer()
return rendered_doc
except Exception as err:
exception = errors.BuildError(str(err))
exception.traceback = sys.exc_info()[2]
exception.controller = self
exception.exception = err
raise exception
def render(self, jinja_env=None, request=None):
"""Render the document using the render pool."""
timer = self.pod.profile.timer(
'RenderDocumentController.render',
label='{} ({})'.format(self.doc.pod_path, self.doc.locale),
meta={
'path': self.doc.pod_path,
'locale': str(self.doc.locale)}
).start_timer()
# Validate the path with the config filters.
self.validate_path()
doc = self.doc
template = jinja_env['env'].get_template(doc.view.lstrip('/'))
track_dependency = doc_dependency.DocDependency(doc)
local_tags = tags.create_builtin_tags(
self.pod, doc, track_dependency=track_dependency)
# NOTE: This should be done using get_template(... globals=...)
# or passed as an argument into render but
# it is not available included inside macros???
# See: https://github.com/pallets/jinja/issues/688
template.globals['g'] = local_tags
# Track the message stats, including untranslated strings.
if self.pod.is_enabled(self.pod.FEATURE_TRANSLATION_STATS):
template.globals['_'] = tags.make_doc_gettext(doc)
try:
doc.footnotes.reset()
serving_path = doc.get_serving_path()
if serving_path.endswith('/'):
serving_path = '{}{}'.format(serving_path, self.suffix)
content = self.pod.extensions_controller.trigger('pre_render', doc, doc.body)
if content:
doc.format.update(content=content)
rendered_content = template.render({
'doc': doc,
'request': request,
'env': self.pod.env,
'podspec': self.pod.podspec,
'_track_dependency': track_dependency,
}).lstrip()
rendered_content = self.pod.extensions_controller.trigger(
'post_render', doc, rendered_content)
rendered_doc = rendered_document.RenderedDocument(
serving_path, rendered_content)
timer.stop_timer()
return rendered_doc
except Exception as err:
exception = errors.BuildError(str(err))
exception.traceback = sys.exc_info()[2]
exception.controller = self
exception.exception = err
raise exception
class RenderErrorController(RenderController):
"""Controller for handling rendering for errors."""
def __init__(self, pod, serving_path, route_info, params=None, is_threaded=False):
super(RenderErrorController, self).__init__(
pod, serving_path, route_info, params=params, is_threaded=is_threaded)
self.use_jinja = True
def __repr__(self):
return '<RenderErrorController({})>'.format(self.route_info.meta['view'])
def load(self, source_dir):
"""Load the pod content from file system."""
timer = self.pod.profile.timer(
'RenderErrorController.load',
label='{} ({})'.format(
self.route_info.meta['key'], self.route_info.meta['view']),
meta={
'key': self.route_info.meta['key'],
'view': self.route_info.meta['view'],
}
).start_timer()
source_dir = self.clean_source_dir(source_dir)
# Validate the path with the config filters.
self.validate_path()
try:
serving_path = '/{}.html'.format(self.route_info.meta['key'])
rendered_path = '{}{}'.format(source_dir, serving_path)
rendered_content = self.pod.storage.read(rendered_path)
rendered_doc = rendered_document.RenderedDocument(
serving_path, rendered_content)
timer.stop_timer()
return rendered_doc
except Exception as err:
text = 'Error building {}: {}'
if self.pod:
self.pod.logger.exception(text.format(self, err))
exception = errors.BuildError(text.format(self, err))
exception.traceback = sys.exc_info()[2]
exception.controller = self
exception.exception = err
raise exception
def render(self, jinja_env=None, request=None):
"""Render the document using the render pool."""
timer = self.pod.profile.timer(
'RenderErrorController.render',
label='{} ({})'.format(
self.route_info.meta['key'], self.route_info.meta['view']),
meta={
'key': self.route_info.meta['key'],
'view': self.route_info.meta['view'],
}
).start_timer()
# Validate the path with the config filters.
self.validate_path()
with jinja_env['lock']:
template = jinja_env['env'].get_template(
self.route_info.meta['view'].lstrip('/'))
local_tags = tags.create_builtin_tags(self.pod, doc=None)
# NOTE: This should be done using get_template(... globals=...)
# or passed as an argument into render but
# it is not available included inside macros???
# See: https://github.com/pallets/jinja/issues/688
template.globals['g'] = local_tags
try:
serving_path = '/{}.html'.format(self.route_info.meta['key'])
rendered_doc = rendered_document.RenderedDocument(
serving_path, template.render({
'doc': None,
'env': self.pod.env,
'podspec': self.pod.podspec,
}).lstrip())
timer.stop_timer()
return rendered_doc
except Exception as err:
text = 'Error building {}: {}'
if self.pod:
self.pod.logger.exception(text.format(self, err))
exception = errors.BuildError(text.format(self, err))
exception.traceback = sys.exc_info()[2]
exception.controller = self
exception.exception = err
raise exception
class RenderSitemapController(RenderController):
"""Controller for handling rendering for sitemaps."""
@property
def mimetype(self):
"""Determine headers to serve for https requests."""
return mimetypes.guess_type(self.serving_path)[0]
def load(self, source_dir):
"""Load the pod content from file system."""
timer = self.pod.profile.timer(
'RenderSitemapController.load',
label='{}'.format(self.serving_path),
meta=self.route_info.meta,
).start_timer()
source_dir = self.clean_source_dir(source_dir)
# Validate the path with the config filters.
self.validate_path()
try:
rendered_path = '{}{}'.format(source_dir, self.serving_path)
rendered_content = self.pod.storage.read(rendered_path)
rendered_doc = rendered_document.RenderedDocument(
self.serving_path, rendered_content)
timer.stop_timer()
return rendered_doc
except Exception as err:
text = 'Error building {}: {}'
if self.pod:
self.pod.logger.exception(text.format(self, err))
exception = errors.BuildError(text.format(self, err))
exception.traceback = sys.exc_info()[2]
exception.controller = self
exception.exception = err
raise exception
def render(self, jinja_env=None, request=None):
"""Render the document using the render pool."""
timer = self.pod.profile.timer(
'RenderSitemapController.render',
label='{}'.format(self.serving_path),
meta=self.route_info.meta,
).start_timer()
# Validate the path with the config filters.
self.validate_path()
# Duplicate the routes to use the filters without messing up routing.
temp_router = self.pod.router.__class__(self.pod)
temp_router.add_all()
# Sitemaps only show documents...?
temp_router.filter('whitelist', kinds=['doc'])
for sitemap_filter in self.route_info.meta.get('filters') or []:
temp_router.filter(
sitemap_filter['type'], collection_paths=sitemap_filter.get('collections'),
paths=sitemap_filter.get('paths'), locales=sitemap_filter.get('locales'))
# Need a custom root for rendering sitemap.
root = os.path.join(utils.get_grow_dir(), 'pods', 'templates')
jinja_env = self.pod.render_pool.custom_jinja_env(root=root)
with jinja_env['lock']:
if self.route_info.meta.get('template'):
content = self.pod.read_file(self.route_info.meta['template'])
template = jinja_env['env'].from_string(content)
else:
template = jinja_env['env'].get_template('sitemap.xml')
try:
docs = []
for _, value, _ in temp_router.routes.nodes:
docs.append(self.pod.get_doc(value.meta['pod_path'], locale=value.meta['locale']))
rendered_doc = rendered_document.RenderedDocument(
self.serving_path, template.render({
'pod': self.pod,
'env': self.pod.env,
'docs': docs,
'podspec': self.pod.podspec,
}).lstrip())
timer.stop_timer()
return rendered_doc
except Exception as err:
text = 'Error building {}: {}'
if self.pod:
self.pod.logger.exception(text.format(self, err))
exception = errors.BuildError(text.format(self, err))
exception.traceback = sys.exc_info()[2]
exception.controller = self
exception.exception = err
raise exception
class RenderStaticDocumentController(RenderController):
"""Controller for handling rendering for static documents."""
def __init__(self, pod, serving_path, route_info, params=None, is_threaded=False):
super(RenderStaticDocumentController, self).__init__(
pod, serving_path, route_info, params=params, is_threaded=is_threaded)
self._static_doc = None
self._pod_path = None
def __repr__(self):
return '<RenderStaticDocumentController({})>'.format(self.route_info.meta['pod_path'])
@property
def pod_path(self):
"""Static doc for the controller."""
if self._pod_path:
return self._pod_path
locale = self.route_info.meta.get(
'locale', self.params.get('locale'))
if 'pod_path' in self.route_info.meta:
self._pod_path = self.route_info.meta['pod_path']
else:
for source_format in self.route_info.meta['source_formats']:
path_format = '{}{}'.format(source_format, self.params['*'])
self._pod_path = self.pod.path_format.format_static(
path_format, locale=locale)
# Strip the fingerprint to get to the raw static file.
self._pod_path = static_document.StaticDocument.strip_fingerprint(
self._pod_path)
try:
# Throws an error when the document doesn't exist.
_ = self.pod.get_static(self._pod_path, locale=locale)
break
except errors.DocumentDoesNotExistError:
self._pod_path = None
return self._pod_path
@property
def static_doc(self):
"""Static doc for the controller."""
if not self._static_doc:
locale = self.route_info.meta.get(
'locale', self.params.get('locale'))
self._static_doc = self.pod.get_static(self.pod_path, locale=locale)
return self._static_doc
@property
def mimetype(self):
"""Determine headers to serve for https requests."""
return mimetypes.guess_type(self.serving_path)[0]
def get_http_headers(self):
"""Determine headers to serve for http requests."""
headers = super(RenderStaticDocumentController, self).get_http_headers()
if self.pod_path is None:
return headers
path = self.pod.abs_path(self.static_doc.pod_path)
self.pod.storage.update_headers(headers, path)
modified = self.pod.storage.modified(path)
time_obj = datetime.datetime.fromtimestamp(modified).timetuple()
time_format = '%a, %d %b %Y %H:%M:%S GMT'
headers['Last-Modified'] = time.strftime(time_format, time_obj)
headers['ETag'] = '"{}"'.format(headers['Last-Modified'])
headers['X-Grow-Pod-Path'] = self.static_doc.pod_path
if self.static_doc.locale:
headers['X-Grow-Locale'] = self.static_doc.locale
return headers
def load(self, source_dir):
"""Load the pod content from file system."""
timer = self.pod.profile.timer(
'RenderStaticDocumentController.load', label=self.serving_path,
meta={'path': self.serving_path}).start_timer()
source_dir = self.clean_source_dir(source_dir)
# Validate the path with the static config specific filter.
self.validate_path(self.route_info.meta['path_filter'])
rendered_path = '{}{}'.format(source_dir, self.serving_path)
rendered_content = self.pod.storage.read(rendered_path)
rendered_doc = rendered_document.RenderedDocument(
self.serving_path, rendered_content)
timer.stop_timer()
return rendered_doc
def render(self, jinja_env=None, request=None):
"""Read the static file."""
timer = self.pod.profile.timer(
'RenderStaticDocumentController.render', label=self.serving_path,
meta={'path': self.serving_path}).start_timer()
if not self.pod_path or not self.pod.file_exists(self.pod_path):
text = '{} was not found in static files.'
raise errors.RouteNotFoundError(text.format(self.serving_path))
# Validate the path with the static config specific filter.
self.validate_path(self.route_info.meta['path_filter'])
rendered_content = self.pod.read_file(self.pod_path)
rendered_content = self.pod.extensions_controller.trigger(
'post_render', self.static_doc, rendered_content)
rendered_doc = rendered_document.RenderedDocument(
self.serving_path, rendered_content)
timer.stop_timer()
return rendered_doc
| grow/grow | grow/rendering/render_controller.py | Python | mit | 21,264 |
# Copyright (C) 2012 Andy Balaam and The Pepper Developers
# Released under the MIT License. See the file COPYING.txt for details.
from nose.tools import *
from libpepper import builtins
from libpepper.environment import PepEnvironment
from libpepper.vals.all_values import *
def PlusEquals_increases_int_value___test():
env = PepEnvironment( None )
builtins.add_builtins( env )
PepInit( PepSymbol('int'), PepSymbol('x'), PepInt('7') ).evaluate( env )
# Sanity
assert_equal( "7", PepSymbol('x').evaluate( env ).value )
PepModification( PepSymbol('x'), PepInt('3') ).evaluate( env )
assert_equal( "10", PepSymbol('x').evaluate( env ).value )
def PlusEquals_increases_float_value___test():
env = PepEnvironment( None )
builtins.add_builtins( env )
PepInit(
PepSymbol('float'),
PepSymbol('x'),
PepFloat('7.2')
).evaluate( env )
# Sanity
assert_equal( "7.2", PepSymbol('x').evaluate( env ).value )
PepModification( PepSymbol('x'), PepFloat('0.3') ).evaluate( env )
assert_equal( "7.5", PepSymbol('x').evaluate( env ).value )
| andybalaam/pepper | old/pepper1/src/test/evaluation/test_plusequals.py | Python | mit | 1,124 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
from clint.textui import prompt
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
import kolibri
from ...utils import dbrestore
from ...utils import default_backup_folder
from ...utils import get_dtm_from_backup_name
from ...utils import search_latest
from kolibri.utils import server
logger = logging.getLogger(__name__)
class Command(BaseCommand):
output_transaction = True
# @ReservedAssignment
help = (
"Restores a database backup of Kolibri. This is not intended for "
"replication across different devices, but *only* for restoring a "
"single device from a local backup of the database."
)
def add_arguments(self, parser):
parser_group = parser.add_mutually_exclusive_group(required=True)
parser_group.add_argument(
"dump_file",
nargs="?",
type=str,
help="Specifies the exact dump file to restore from",
)
parser_group.add_argument(
"--latest",
"-l",
action="store_true",
dest="latest",
help=(
"Automatically detect and restore from latest backup matching "
"the major and minor version (X.Y) of current installation."
),
)
parser_group.add_argument(
"--select",
"-s",
action="store_true",
dest="select",
help=(
"Show the list of the last 10 backups Kolibri has done automatically "
"for the user to select which one must be restored."
),
)
def fetch_latest(self, dumps_root):
"""
Returns the latest backup file available in the dumps_root directory
"""
use_backup = None
# Ultimately, we are okay about a backup from a minor release
fallback_version = ".".join(map(str, kolibri.VERSION[:2]))
if os.path.exists(dumps_root):
use_backup = search_latest(dumps_root, fallback_version)
if not use_backup:
raise RuntimeError(
"Could not find a database backup for version: {}".format(
fallback_version
)
)
return use_backup
def select_backup(self, dumps_root):
"""
Returns the latest 10 dumps available in the dumps_root directory.
Dumps are sorted by date, latests first
"""
backups = []
if os.path.exists(dumps_root):
backups = os.listdir(dumps_root)
backups = filter(lambda f: f.endswith(".dump"), backups)
backups = list(backups)
backups.sort(key=get_dtm_from_backup_name, reverse=True)
backups = backups[:10] # don't show more than 10 backups
if not backups:
raise RuntimeError("Could not find a database backup}")
# Shows a list of options to select from
backup_options = [
{
"selector": str(sel + 1),
"prompt": get_dtm_from_backup_name(backup),
"return": backup,
}
for sel, backup in enumerate(backups)
]
selected_backup = prompt.options(
"Type the number in brackets to select the backup to be restored",
backup_options,
)
return os.path.join(dumps_root, selected_backup)
def handle(self, *args, **options):
try:
server.get_status()
self.stderr.write(
self.style.ERROR(
"Cannot restore while Kolibri is running, please run:\n"
"\n"
" kolibri stop\n"
)
)
raise SystemExit()
except server.NotRunning:
# Great, it's not running!
pass
latest = options["latest"]
select = options["select"]
use_backup = options.get("dump_file", None)
logger.info("Beginning database restore")
search_root = default_backup_folder()
if latest:
use_backup = self.fetch_latest(search_root)
elif select:
use_backup = self.select_backup(search_root)
logger.info("Using backup file: {}".format(use_backup))
if not os.path.isfile(use_backup):
raise CommandError("Couldn't find: {}".format(use_backup))
dbrestore(use_backup)
self.stdout.write(
self.style.SUCCESS("Restored database from: {path}".format(path=use_backup))
)
| lyw07/kolibri | kolibri/core/deviceadmin/management/commands/dbrestore.py | Python | mit | 4,759 |
import pytest
from click.testing import CliRunner
import doitlive
@pytest.fixture(scope="session")
def runner():
doitlive.cli.TESTING = True
return CliRunner()
| sloria/doitlive | tests/conftest.py | Python | mit | 171 |
print ("Hello python world! My first python script!!")
print ("feeling excited!!") | balajithangamani/LearnPy | hello.py | Python | mit | 82 |
import asyncio
import signal
import configargparse
from structlog import get_logger
from alarme import Application
from alarme.scripts.common import init_logging, uncaught_exception, loop_uncaught_exception
def exit_handler(app, logger, sig):
logger.info('application_signal', name=sig.name, value=sig.value)
app.stop()
def run(config_path, log,
core_application_factory=Application,
):
# Logging init
logger = get_logger()
loop = asyncio.get_event_loop()
init_logging(log, 'server')
loop.set_exception_handler(loop_uncaught_exception)
# Core init
logger.info('application_init')
core_app = core_application_factory(exception_handler=uncaught_exception)
loop.run_until_complete(core_app.load_config(config_path))
for signal_code in (signal.SIGINT, signal.SIGTERM):
loop.add_signal_handler(signal_code, exit_handler, core_app, logger, signal_code)
loop.run_until_complete(core_app.run())
loop.close()
def main():
parser = configargparse.ArgParser(description='Alarm system software for Raspberry Pi')
parser.add_argument('-gc', '--generic-config', is_config_file=True, help='Generic config')
parser.add_argument('-c', '--config', help='Config directory')
parser.add_argument('-l', '--log', type=str, default='/var/log/alarme', help='Logs dir')
args = parser.parse_args()
run(args.config, args.log)
if __name__ == '__main__':
main()
| insolite/alarme | alarme/scripts/server.py | Python | mit | 1,452 |
"""
Last: 5069
Script with simple UI for creating gaplines data
Run: python WordClassDM.py --index 0
Controls:
setting gaplines - click and drag
saving gaplines - 's' key
reseting gaplines - 'r' key
skip to next img - 'n' key
delete last line - 'd' key
"""
import cv2
import os
import numpy as np
import glob
import argparse
import simplejson
from ocr.normalization import imageNorm
from ocr.viz import printProgressBar
def loadImages(dataloc, idx=0, num=None):
""" Load images and labels """
print("Loading words...")
# Load images and short them from the oldest to the newest
imglist = glob.glob(os.path.join(dataloc, u'*.jpg'))
imglist.sort(key=lambda x: float(x.split("_")[-1][:-4]))
tmpLabels = [name[len(dataloc):] for name in imglist]
labels = np.array(tmpLabels)
images = np.empty(len(imglist), dtype=object)
if num is None:
upper = len(imglist)
else:
upper = min(idx + num, len(imglist))
num += idx
for i, img in enumerate(imglist):
# TODO Speed up loading - Normalization
if i >= idx and i < upper:
images[i] = imageNorm(
cv2.cvtColor(cv2.imread(img), cv2.COLOR_BGR2RGB),
height=60,
border=False,
tilt=True,
hystNorm=True)
printProgressBar(i-idx, upper-idx-1)
print()
return (images[idx:num], labels[idx:num])
def locCheck(loc):
return loc + '/' if loc[-1] != '/' else loc
class Cycler:
drawing = False
scaleF = 4
def __init__(self, idx, data_loc, save_loc):
""" Load images and starts from given index """
# self.images, self.labels = loadImages(loc, idx)
# Create save_loc directory if not exists
if not os.path.exists(save_loc):
os.makedirs(save_loc)
self.data_loc = locCheck(data_loc)
self.save_loc = locCheck(save_loc)
self.idx = 0
self.org_idx = idx
self.blockLoad()
self.image_act = self.images[self.idx]
cv2.namedWindow('image')
cv2.setMouseCallback('image', self.mouseHandler)
self.nextImage()
self.run()
def run(self):
while(1):
self.imageShow()
k = cv2.waitKey(1) & 0xFF
if k == ord('d'):
# Delete last line
self.deleteLastLine()
elif k == ord('r'):
# Clear current gaplines
self.nextImage()
elif k == ord('s'):
# Save gaplines with image
if self.saveData():
self.idx += 1
if self.idx >= len(self.images):
if not self.blockLoad():
break
self.nextImage()
elif k == ord('n'):
# Skip to next image
self.idx += 1
if self.idx >= len(self.images):
if not self.blockLoad():
break
self.nextImage()
elif k == 27:
cv2.destroyAllWindows()
break
print("End of labeling at INDEX: " + str(self.org_idx + self.idx))
def blockLoad(self):
self.images, self.labels = loadImages(
self.data_loc, self.org_idx + self.idx, 100)
self.org_idx += self.idx
self.idx = 0
return len(self.images) is not 0
def imageShow(self):
cv2.imshow(
'image',
cv2.resize(
self.image_act,
(0,0),
fx=self.scaleF,
fy=self.scaleF,
interpolation=cv2.INTERSECT_NONE))
def nextImage(self):
self.image_act = cv2.cvtColor(self.images[self.idx], cv2.COLOR_GRAY2RGB)
self.label_act = self.labels[self.idx][:-4]
self.gaplines = [0, self.image_act.shape[1]]
self.redrawLines()
print(self.org_idx + self.idx, ":", self.label_act.split("_")[0])
self.imageShow();
def saveData(self):
self.gaplines.sort()
print("Saving image with gaplines: ", self.gaplines)
try:
assert len(self.gaplines) - 1 == len(self.label_act.split("_")[0])
cv2.imwrite(
self.save_loc + '%s.jpg' % (self.label_act),
self.images[self.idx])
with open(self.save_loc + '%s.txt' % (self.label_act), 'w') as fp:
simplejson.dump(self.gaplines, fp)
return True
except:
print("Wront number of gaplines")
return False
print()
self.nextImage()
def deleteLastLine(self):
if len(self.gaplines) > 0:
del self.gaplines[-1]
self.redrawLines()
def redrawLines(self):
self.image_act = cv2.cvtColor(self.images[self.idx], cv2.COLOR_GRAY2RGB)
for x in self.gaplines:
self.drawLine(x)
def drawLine(self, x):
cv2.line(
self.image_act, (x, 0), (x, self.image_act.shape[0]), (0,255,0), 1)
def mouseHandler(self, event, x, y, flags, param):
# Clip x into image width range
x = max(min(self.image_act.shape[1], x // self.scaleF), 0)
if event == cv2.EVENT_LBUTTONDOWN:
self.drawing = True
self.tmp = self.image_act.copy()
self.drawLine(x)
elif event == cv2.EVENT_MOUSEMOVE:
if self.drawing == True:
self.image_act = self.tmp.copy()
self.drawLine(x)
elif event == cv2.EVENT_LBUTTONUP:
self.drawing = False
if x not in self.gaplines:
self.gaplines.append(x)
self.image_act = self.tmp.copy()
self.drawLine(x)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
"Script creating UI for gaplines classification")
parser.add_argument(
"--index",
type=int,
default=0,
help="Index of starting image")
parser.add_argument(
"--data",
type=str,
default='data/words_raw',
help="Path to folder with images")
parser.add_argument(
"--save",
type=str,
default='data/words2',
help="Path to folder for saving images with gaplines")
args = parser.parse_args()
Cycler(args.index, args.data, args.save)
| Breta01/handwriting-ocr | src/data/data_creation/WordClassDM.py | Python | mit | 6,492 |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap.hoverlabel"
_path_str = "treemap.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| plotly/python-api | packages/python/plotly/plotly/graph_objs/treemap/hoverlabel/_font.py | Python | mit | 11,209 |
"""Transformation functions for expressions."""
from tt.expressions import BooleanExpression
from tt.transformations.utils import ensure_bexpr
def apply_de_morgans(expr):
"""Convert an expression to a form with De Morgan's Law applied.
:returns: A new expression object, transformed so that De Morgan's Law has
been applied to negated *ANDs* and *ORs*.
:rtype: :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
:raises InvalidArgumentTypeError: If ``expr`` is not a valid type.
Here's a couple of simple examples showing De Morgan's Law being applied
to a negated AND and a negated OR::
>>> from tt import apply_de_morgans
>>> apply_de_morgans('~(A /\\ B)')
<BooleanExpression "~A \\/ ~B">
>>> apply_de_morgans('~(A \\/ B)')
<BooleanExpression "~A /\\ ~B">
"""
bexpr = ensure_bexpr(expr)
return BooleanExpression(bexpr.tree.apply_de_morgans())
def apply_identity_law(expr):
"""Convert an expression to a form with the Identity Law applied.
It should be noted that this transformation will also annihilate terms
when possible. One such case where this would be applicable is the
expression ``A and 0``, which would be transformed to the constant value
``0``.
:returns: A new expression object, transformed so that the Identity Law
has been applied to applicable *ANDs* and *ORs*.
:rtype: :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
:raises InvalidArgumentTypeError: If ``expr`` is not a valid type.
Here are a few simple examples showing the behavior of this transformation
across all two-operand scenarios::
>>> from tt import apply_identity_law
>>> apply_identity_law('A and 1')
<BooleanExpression "A">
>>> apply_identity_law('A and 0')
<BooleanExpression "0">
>>> apply_identity_law('A or 0')
<BooleanExpression "A">
>>> apply_identity_law('A or 1')
<BooleanExpression "1">
"""
bexpr = ensure_bexpr(expr)
return BooleanExpression(bexpr.tree.apply_identity_law())
def apply_idempotent_law(expr):
"""Convert an expression to a form with the Idempotent Law applied.
:returns: A new expression object, transformed so that the Idempotent Law
has been applied to applicable clauses.
:rtype: :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
:raises InvalidArgumentTypeError: If ``expr`` is not a valid data type.
This transformation will apply the Idempotent Law to clauses of *AND* and
*OR* operators containing redundant operands. Here are a couple of simple
examples::
>>> from tt import apply_idempotent_law
>>> apply_idempotent_law('A and A')
<BooleanExpression "A">
>>> apply_idempotent_law('B or B')
<BooleanExpression "B">
This transformation will consider similarly-negated operands to be
redundant; for example::
>>> from tt import apply_idempotent_law
>>> apply_idempotent_law('~A and ~~~A')
<BooleanExpression "~A">
>>> apply_idempotent_law('B or ~B or ~~B or ~~~B or ~~~~B or ~~~~~B')
<BooleanExpression "B or ~B">
Let's also take a quick look at this transformation's ability to prune
redundant operands from CNF and DNF clauses::
>>> from tt import apply_idempotent_law
>>> apply_idempotent_law('(A and B and C and C and B) or (A and A)')
<BooleanExpression "(A and B and C) or A">
Of important note is that this transformation will not recursively apply
the Idempotent Law to operands that bubble up. Here's an example
illustrating this case::
>>> from tt import apply_idempotent_law
>>> apply_idempotent_law('(A or A) and (A or A)')
<BooleanExpression "A and A">
"""
bexpr = ensure_bexpr(expr)
return BooleanExpression(bexpr.tree.apply_idempotent_law())
def apply_inverse_law(expr):
"""Convert an expression to a form with the Inverse Law applied.
:returns: A new expression object, transformed so that the Inverse Law
has been applied to applicable *ANDs* and *ORs*.
:rtype: :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
:raises InvalidArgumentTypeError: If ``expr`` is not a valid type.
This transformation will apply the Identity Law to simple binary
expressions consisting of negated and non-negated forms of the same
operand. Let's take a look::
>>> from tt.transformations import apply_inverse_law
>>> apply_inverse_law('A and ~A')
<BooleanExpression "0">
>>> apply_inverse_law('A or B or ~B or C')
<BooleanExpression "1">
This transformation will also apply the behavior expected of the Inverse
Law when negated and non-negated forms of the same operand appear in the
same CNF or DNF clause in an expression::
>>> from tt.transformations import apply_inverse_law
>>> apply_inverse_law('(A or B or ~A) -> (C and ~C)')
<BooleanExpression "1 -> 0">
>>> apply_inverse_law('(A or !!!A) xor (not C or not not C)')
<BooleanExpression "1 xor 1">
"""
bexpr = ensure_bexpr(expr)
return BooleanExpression(bexpr.tree.apply_inverse_law())
def coalesce_negations(expr):
"""Convert an expression to a form with all negations condensed.
:returns: A new expression object, transformed so that all "runs" of
logical *NOTs* are condensed into the minimal equivalent number.
:rtype: :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
:raises InvalidArgumentTypeError: If ``expr`` is not a valid type.
Here's a simple example showing the basic premise of this transformation::
>>> from tt import coalesce_negations
>>> coalesce_negations('~~A or ~B or ~~~C or ~~~~D')
<BooleanExpression "A or ~B or ~C or D">
This transformation works on more complex expressions, too::
>>> coalesce_negations('!!(A -> not not B) or ~(~(A xor B))')
<BooleanExpression "(A -> B) or (A xor B)">
It should be noted that this transformation will also apply negations
to constant operands, as well. The behavior for this functionality is as
follows::
>>> coalesce_negations('~0')
<BooleanExpression "1">
>>> coalesce_negations('~1')
<BooleanExpression "0">
>>> coalesce_negations('~~~0 -> ~1 -> not 1')
<BooleanExpression "1 -> 0 -> 0">
"""
bexpr = ensure_bexpr(expr)
return BooleanExpression(bexpr.tree.coalesce_negations())
def distribute_ands(expr):
"""Convert an expression to distribute ANDs over ORed clauses.
:param expr: The expression to transform.
:type expr: :class:`str <python:str>` or :class:`BooleanExpression \
<tt.expressions.bexpr.BooleanExpression>`
:returns: A new expression object, transformed to distribute ANDs over ORed
clauses.
:rtype: :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
:raises InvalidArgumentTypeError: If ``expr`` is not a valid type.
Here's a couple of simple examples::
>>> from tt import distribute_ands
>>> distribute_ands('A and (B or C or D)')
<BooleanExpression "(A and B) or (A and C) or (A and D)">
>>> distribute_ands('(A or B) and C')
<BooleanExpression "(A and C) or (B and C)">
And an example involving distributing a sub-expression::
>>> distribute_ands('(A and B) and (C or D or E)')
<BooleanExpression "(A and B and C) or (A and B and D) or \
(A and B and E)">
"""
bexpr = ensure_bexpr(expr)
return BooleanExpression(bexpr.tree.distribute_ands())
def distribute_ors(expr):
"""Convert an expression to distribute ORs over ANDed clauses.
:param expr: The expression to transform.
:type expr: :class:`str <python:str>` or :class:`BooleanExpression \
<tt.expressions.bexpr.BooleanExpression>`
:returns: A new expression object, transformed to distribute ORs over ANDed
clauses.
:rtype: :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
:raises InvalidArgumentTypeError: If ``expr`` is not a valid type.
Here's a couple of simple examples::
>>> from tt import distribute_ors
>>> distribute_ors('A or (B and C and D and E)')
<BooleanExpression "(A or B) and (A or C) and (A or D) and (A or E)">
>>> distribute_ors('(A and B) or C')
<BooleanExpression "(A or C) and (B or C)">
And an example involving distributing a sub-expression::
>>> distribute_ors('(A or B) or (C and D)')
<BooleanExpression "(A or B or C) and (A or B or D)">
"""
bexpr = ensure_bexpr(expr)
return BooleanExpression(bexpr.tree.distribute_ors())
def to_cnf(expr):
"""Convert an expression to conjunctive normal form (CNF).
This transformation only guarantees to produce an equivalent form of the
passed expression in conjunctive normal form; the transformed expression
may be an inefficent representation of the passed expression.
:param expr: The expression to transform.
:type expr: :class:`str <python:str>` or :class:`BooleanExpression \
<tt.expressions.bexpr.BooleanExpression>`
:returns: A new expression object, transformed to be in CNF.
:rtype: :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
:raises InvalidArgumentTypeError: If ``expr`` is not a valid type.
Here are a few examples::
>>> from tt import to_cnf
>>> b = to_cnf('(A nor B) impl C')
>>> b
<BooleanExpression "A or B or C">
>>> b.is_cnf
True
>>> b = to_cnf(r'~(~(A /\\ B) /\\ C /\\ D)')
>>> b
<BooleanExpression "(A \\/ ~C \\/ ~D) /\\ (B \\/ ~C \\/ ~D)">
>>> b.is_cnf
True
"""
bexpr = ensure_bexpr(expr)
return BooleanExpression(bexpr.tree.to_cnf())
def to_primitives(expr):
"""Convert an expression to a form with only primitive operators.
All operators will be transformed equivalent form composed only of the
logical AND, OR,and NOT operators. Symbolic operators in the passed
expression will remain symbolic in the transformed expression and the same
applies for plain English operators.
:param expr: The expression to transform.
:type expr: :class:`str <python:str>` or :class:`BooleanExpression \
<tt.expressions.bexpr.BooleanExpression>`
:returns: A new expression object, transformed to contain only primitive
operators.
:rtype: :class:`BooleanExpression <tt.expressions.bexpr.BooleanExpression>`
:raises InvalidArgumentTypeError: If ``expr`` is not a valid type.
Here's a simple transformation of exclusive-or::
>>> from tt import to_primitives
>>> to_primitives('A xor B')
<BooleanExpression "(A and not B) or (not A and B)">
And another example of if-and-only-if (using symbolic operators)::
>>> to_primitives('A <-> B')
<BooleanExpression "(A /\\ B) \\/ (~A /\\ ~B)">
"""
bexpr = ensure_bexpr(expr)
return BooleanExpression(bexpr.tree.to_primitives())
| welchbj/tt | tt/transformations/bexpr.py | Python | mit | 11,265 |
#!/usr/bin/python2.7
#-*- coding: utf-8 -*-
import numpy as np
import gl
def testkf(input1,Q,R):
print gl.X_i_1
print gl.P_i_1
#rang(1,N) do not contain N
K_i = gl.P_i_1 / (gl.P_i_1 + R)
X_i = gl.X_i_1 + K_i * (input1 - gl.X_i_1)
P_i = gl.P_i_1 - K_i * gl.P_i_1 + Q
#print (X[i])
#Update
gl.P_i_1 = P_i
gl.X_i_1 = X_i
return X_i
| zharuosi/2017 | pythonNRC/modules/testkf.py | Python | mit | 379 |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/arvind/clover_hack_day/er1_robot/src/er1_motor_driver/msg/Motors.msg"
services_str = "/home/arvind/clover_hack_day/er1_robot/src/er1_motor_driver/srv/AddTwoInts.srv"
pkg_name = "er1_motor_driver"
dependencies_str = "std_msgs"
langs = "gencpp;genlisp;genpy"
dep_include_paths_str = "er1_motor_driver;/home/arvind/clover_hack_day/er1_robot/src/er1_motor_driver/msg;std_msgs;/opt/ros/indigo/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/indigo/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| arvindpereira/clover_hack_day | er1_robot/build/er1_motor_driver/cmake/er1_motor_driver-genmsg-context.py | Python | mit | 677 |
from mimetypes import guess_type
def get_git_info():
"""
Parses the git info and returns a tuple containg the owner and repo
:deprecated:
:rtype: tuple
:return: (owner name, repo name)
"""
repo = ''
with open('.git/config') as f:
for line in f.readlines():
if 'url' in line:
repo = line.replace('url = ', '').strip()
r = repo.split('/')
# Return a tuple containing the owner and the repo name
return r[-2], r[-1]
def detect_mimetype(file_):
"""
Detects the provided file's mimetype. Used to determine if we should read
the file line-by-line.
:param str file_: The name of the file to guess the mimetype of
:rtype: str
:return: The mimetype of the file provided
"""
return guess_type(file_)
| GrappigPanda/pygemony | pyg/utils.py | Python | mit | 811 |
import logging
logging.basicConfig(level=logging.DEBUG)
import nengo
import nengo_spinnaker
import numpy as np
def test_probe_ensemble_voltages():
with nengo.Network("Test Network") as network:
# Create an Ensemble with 2 neurons that have known gain and bias. The
# result is that we know how the membrane voltage should change over
# time even with no external stimulus.
ens = nengo.Ensemble(2, 1)
ens.bias = [0.5, 1.0]
ens.gain = [0.0, 0.0]
# Add the voltage probe
probe = nengo.Probe(ens.neurons, "voltage")
# Compute the rise time to 95%
max_t = -ens.neuron_type.tau_rc * np.log(0.05)
# Run the simulation for this period of time
sim = nengo_spinnaker.Simulator(network)
with sim:
sim.run(max_t)
# Compute the ideal voltage curves
c = 1.0 - np.exp(-sim.trange() / ens.neuron_type.tau_rc)
ideal = np.dot(ens.bias[:, np.newaxis], c[np.newaxis, :]).T
# Assert that the ideal curves match the retrieved curves well
assert np.allclose(ideal, sim.data[probe], atol=1e-3)
if __name__ == "__main__":
test_probe_ensemble_voltages()
| project-rig/nengo_spinnaker | regression-tests/test_voltage_probing.py | Python | mit | 1,158 |
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from skTMVA import convert_bdt_sklearn_tmva
import cPickle
import numpy as np
from numpy.random import RandomState
RNG = RandomState(21)
# Construct an example dataset for binary classification
n_vars = 2
n_events = 10000
signal = RNG.multivariate_normal(
np.ones(n_vars), np.diag(np.ones(n_vars)), n_events)
background = RNG.multivariate_normal(
np.ones(n_vars) * -1, np.diag(np.ones(n_vars)), n_events)
X = np.concatenate([signal, background])
y = np.ones(X.shape[0])
w = RNG.randint(1, 10, n_events * 2)
y[signal.shape[0]:] *= -1
permute = RNG.permutation(y.shape[0])
X = X[permute]
y = y[permute]
# Use all dataset for training
X_train, y_train, w_train = X, y, w
# Declare BDT - we are going to use AdaBoost Decision Tree
dt = DecisionTreeClassifier(max_depth=3,
min_samples_leaf=int(0.05*len(X_train)))
bdt = AdaBoostClassifier(dt,
algorithm='SAMME',
n_estimators=800,
learning_rate=0.5)
# Train BDT
bdt.fit(X_train, y_train)
# Save BDT to pickle file
with open('bdt_sklearn_to_tmva_example.pkl', 'wb') as fid:
cPickle.dump(bdt, fid)
# Save BDT to TMVA xml file
# Note:
# - declare input variable names and their type
# - variable order is important for TMVA
convert_bdt_sklearn_tmva(bdt, [('var1', 'F'), ('var2', 'F')], 'bdt_sklearn_to_tmva_example.xml')
| yuraic/koza4ok | examples/bdt_sklearn_to_tmva_AdaBoost.py | Python | mit | 1,493 |
import sys
script, encoding, error = sys.argv
def main(language_file, encoding, errors):
line = language_file.readline()
if line:
print_line(line, encoding, errors)
return main(language_file, encoding, errors)
def print_line(line, encoding, errors):
next_lang = line.strip()
raw_bytes = next_lang.encode(encoding, errors=errors)
cooked_string = raw_bytes.decode(encoding, errors=errors)
print(raw_bytes, "<===>", cooked_string)
languages = open("languages.txt", encoding="utf-8")
main(languages, encoding, error)
| Herne/pythonplayground | lp3thw/ex23.py | Python | mit | 563 |
from cereal import car
from opendbc.can.packer import CANPacker
from selfdrive.car.mazda import mazdacan
from selfdrive.car.mazda.values import CarControllerParams, Buttons
from selfdrive.car import apply_std_steer_torque_limits
VisualAlert = car.CarControl.HUDControl.VisualAlert
class CarController():
def __init__(self, dbc_name, CP, VM):
self.apply_steer_last = 0
self.packer = CANPacker(dbc_name)
self.steer_rate_limited = False
self.brake_counter = 0
def update(self, c, CS, frame):
can_sends = []
apply_steer = 0
self.steer_rate_limited = False
if c.active:
# calculate steer and also set limits due to driver torque
new_steer = int(round(c.actuators.steer * CarControllerParams.STEER_MAX))
apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last,
CS.out.steeringTorque, CarControllerParams)
self.steer_rate_limited = new_steer != apply_steer
if CS.out.standstill and frame % 5 == 0:
# Mazda Stop and Go requires a RES button (or gas) press if the car stops more than 3 seconds
# Send Resume button at 20hz if we're engaged at standstill to support full stop and go!
# TODO: improve the resume trigger logic by looking at actual radar data
can_sends.append(mazdacan.create_button_cmd(self.packer, CS.CP.carFingerprint, CS.crz_btns_counter, Buttons.RESUME))
if c.cruiseControl.cancel or (CS.out.cruiseState.enabled and not c.enabled):
# If brake is pressed, let us wait >70ms before trying to disable crz to avoid
# a race condition with the stock system, where the second cancel from openpilot
# will disable the crz 'main on'. crz ctrl msg runs at 50hz. 70ms allows us to
# read 3 messages and most likely sync state before we attempt cancel.
self.brake_counter = self.brake_counter + 1
if frame % 10 == 0 and not (CS.out.brakePressed and self.brake_counter < 7):
# Cancel Stock ACC if it's enabled while OP is disengaged
# Send at a rate of 10hz until we sync with stock ACC state
can_sends.append(mazdacan.create_button_cmd(self.packer, CS.CP.carFingerprint, CS.crz_btns_counter, Buttons.CANCEL))
else:
self.brake_counter = 0
self.apply_steer_last = apply_steer
# send HUD alerts
if frame % 50 == 0:
ldw = c.hudControl.visualAlert == VisualAlert.ldw
steer_required = c.hudControl.visualAlert == VisualAlert.steerRequired
# TODO: find a way to silence audible warnings so we can add more hud alerts
steer_required = steer_required and CS.lkas_allowed_speed
can_sends.append(mazdacan.create_alert_command(self.packer, CS.cam_laneinfo, ldw, steer_required))
# send steering command
can_sends.append(mazdacan.create_steering_control(self.packer, CS.CP.carFingerprint,
frame, apply_steer, CS.cam_lkas))
new_actuators = c.actuators.copy()
new_actuators.steer = apply_steer / CarControllerParams.STEER_MAX
return new_actuators, can_sends
| commaai/openpilot | selfdrive/car/mazda/carcontroller.py | Python | mit | 3,112 |
import pprint
from cytoolz import (
assoc,
concatv,
partial,
pipe,
)
from semantic_version import (
Spec,
)
from eth_utils import (
add_0x_prefix,
to_dict,
to_tuple,
)
from solc import (
get_solc_version,
compile_standard,
)
from solc.exceptions import (
ContractsNotFound,
)
from populus.utils.compile import (
load_json_if_string,
normalize_contract_metadata,
)
from populus.utils.linking import (
normalize_standard_json_link_references,
)
from populus.utils.mappings import (
has_nested_key,
get_nested_key,
set_nested_key,
)
from .base import (
BaseCompilerBackend,
)
@to_dict
def build_standard_input_sources(source_file_paths):
for file_path in source_file_paths:
with open(file_path) as source_file:
yield file_path, {'content': source_file.read()}
@to_dict
def normalize_standard_json_contract_data(contract_data):
if 'metadata' in contract_data:
yield 'metadata', normalize_contract_metadata(contract_data['metadata'])
if 'evm' in contract_data:
evm_data = contract_data['evm']
if 'bytecode' in evm_data:
yield 'bytecode', add_0x_prefix(evm_data['bytecode'].get('object', ''))
if 'linkReferences' in evm_data['bytecode']:
yield 'linkrefs', normalize_standard_json_link_references(
evm_data['bytecode']['linkReferences'],
)
if 'deployedBytecode' in evm_data:
yield 'bytecode_runtime', add_0x_prefix(evm_data['deployedBytecode'].get('object', ''))
if 'linkReferences' in evm_data['deployedBytecode']:
yield 'linkrefs_runtime', normalize_standard_json_link_references(
evm_data['deployedBytecode']['linkReferences'],
)
if 'abi' in contract_data:
yield 'abi', load_json_if_string(contract_data['abi'])
if 'userdoc' in contract_data:
yield 'userdoc', load_json_if_string(contract_data['userdoc'])
if 'devdoc' in contract_data:
yield 'devdoc', load_json_if_string(contract_data['devdoc'])
@to_tuple
def normalize_compilation_result(compilation_result):
"""
Take the result from the --standard-json compilation and flatten it into an
iterable of contract data dictionaries.
"""
for source_path, file_contracts in compilation_result['contracts'].items():
for contract_name, raw_contract_data in file_contracts.items():
contract_data = normalize_standard_json_contract_data(raw_contract_data)
yield pipe(
contract_data,
partial(assoc, key='source_path', value=source_path),
partial(assoc, key='name', value=contract_name),
)
REQUIRED_OUTPUT_SELECTION = [
'abi',
'metadata',
'evm.bytecode',
'evm.bytecode.object',
'evm.bytecode.linkReferences',
'evm.deployedBytecode',
'evm.deployedBytecode.object',
'evm.deployedBytecode.linkReferences',
]
OUTPUT_SELECTION_KEY = 'settings.outputSelection.*.*'
class SolcStandardJSONBackend(BaseCompilerBackend):
project_source_glob = ('*.sol', )
test_source_glob = ('Test*.sol', )
def __init__(self, *args, **kwargs):
if get_solc_version() not in Spec('>=0.4.11'):
raise OSError(
"The 'SolcStandardJSONBackend can only be used with solc "
"versions >=0.4.11. The SolcCombinedJSONBackend should be used "
"for all versions <=0.4.8"
)
super(SolcStandardJSONBackend, self).__init__(*args, **kwargs)
def get_compiled_contracts(self, source_file_paths, import_remappings):
self.logger.debug("Import remappings: %s", import_remappings)
self.logger.debug("Compiler Settings PRE: %s", pprint.pformat(self.compiler_settings))
# DEBUG
self.compiler_settings['output_values'] = []
self.logger.debug("Compiler Settings POST: %s", pprint.pformat(self.compiler_settings))
if 'remappings' in self.compiler_settings and import_remappings is not None:
self.logger.warn("Import remappings setting will by overridden by backend settings")
sources = build_standard_input_sources(source_file_paths)
std_input = {
'language': 'Solidity',
'sources': sources,
'settings': {
'remappings': import_remappings,
'outputSelection': {
'*': {
'*': REQUIRED_OUTPUT_SELECTION
}
}
}
}
# solc command line options as passed to solc_wrapper()
# https://github.com/ethereum/py-solc/blob/3a6de359dc31375df46418e6ffd7f45ab9567287/solc/wrapper.py#L20
command_line_options = self.compiler_settings.get("command_line_options", {})
# Get Solidity Input Description settings section
# http://solidity.readthedocs.io/en/develop/using-the-compiler.html#input-description
std_input_settings = self.compiler_settings.get("stdin", {})
std_input['settings'].update(std_input_settings)
# Make sure the output selection has all of the required output values.
if has_nested_key(std_input, OUTPUT_SELECTION_KEY):
current_selection = get_nested_key(std_input, OUTPUT_SELECTION_KEY)
output_selection = list(set(concatv(current_selection, REQUIRED_OUTPUT_SELECTION)))
else:
output_selection = REQUIRED_OUTPUT_SELECTION
set_nested_key(std_input, OUTPUT_SELECTION_KEY, output_selection)
self.logger.debug("std_input sections: %s", std_input.keys())
self.logger.debug("Input Description JSON settings are: %s", std_input["settings"])
self.logger.debug("Command line options are: %s", command_line_options)
try:
compilation_result = compile_standard(std_input, **command_line_options)
except ContractsNotFound:
return {}
compiled_contracts = normalize_compilation_result(compilation_result)
return compiled_contracts
| pipermerriam/populus | populus/compilation/backends/solc_standard_json.py | Python | mit | 6,140 |
import datetime
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, render
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from ..tasks import trigger_instance
from . import app_settings
from .enums import StateEnum
from .models import Call
@csrf_exempt
@require_POST
def twiml_callback(request, ident):
call = get_object_or_404(Call, ident=ident)
return render(request, 'reminders/calls/twiml_callback.xml', {
'call': call,
}, content_type='text/xml')
@csrf_exempt
@require_POST
def gather_callback(request, ident):
call = get_object_or_404(Call, ident=ident)
# Mark if the user actually pressed a button
if request.POST.get('Digits'):
call.button_pressed = datetime.datetime.utcnow()
call.save(update_fields=('button_pressed',))
return render(
request,
'reminders/calls/gather_callback.xml',
content_type='text/xml'
)
@csrf_exempt
@require_POST
def status_callback(request, ident):
"""
https://www.twilio.com/help/faq/voice/what-do-the-call-statuses-mean
Example POST data:
SipResponseCode: 500
ApiVersion: 2010-04-01
AccountSid: AC7d6b676d2a17527a71a2bb41301b5e6f
Duration: 0
Direction: outbound-api
CallStatus: busy
SequenceNumber: 0
Timestamp: Mon, 16 Nov 2015 16:10:53 +0000
Caller: +441143032046
CallDuration: 0
To: +447753237119
CallbackSource: call-progress-events
Called: +447751231511
From: +441143032046
CallSid: CA19fd373bd82b81b602c75d1ddc7745e7
"""
call = get_object_or_404(Call, ident=ident)
try:
call.state = {
'queued': StateEnum.dialing,
'initiated': StateEnum.dialing,
'ringing': StateEnum.dialing,
'in-progress': StateEnum.answered,
'completed': StateEnum.answered,
'busy': StateEnum.busy,
'no-answer': StateEnum.no_answer,
'cancelled': StateEnum.failed,
'failed': StateEnum.failed,
}[request.POST['CallStatus']]
except KeyError:
call.state = StateEnum.unknown
call.state_updated = datetime.datetime.utcnow()
call.save(update_fields=('state', 'state_updated'))
if not call.button_pressed \
and call.instance.calls.count() < app_settings.RETRY_COUNT:
trigger_instance.apply_async(
args=(call.instance_id,),
countdown=app_settings.RETRY_AFTER_SECONDS,
)
return HttpResponse('')
| takeyourmeds/takeyourmeds-web | takeyourmeds/reminders/reminders_calls/views.py | Python | mit | 2,634 |
import imp
import os
tools = []
for name in os.listdir(os.path.dirname(__file__)):
if not name.startswith('_'): # _ in the front indicates that this tool is disabled
directory = os.path.join(os.path.dirname(__file__), name)
if os.path.isdir(directory):
file = os.path.join(directory, name + '.py')
tool = imp.load_source(name, file)
tools.append(getattr(tool, name)) | nullzero/wpcgi | wpcgi/tools/__init__.py | Python | mit | 424 |
#!/usr/bin/env python
# Copyright (c) 2011, 2013 SEOmoz, Inc
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from setuptools import setup
setup(
name='s3po',
version='0.6.1',
description='An uploading daemon for S3',
long_description='''Boto is a wonderful library. This is just a little
help for dealing with multipart uploads, batch uploading with gevent
and getting some help when mocking''',
author='Moz, Inc.',
author_email="[email protected]",
url='http://github.com/seomoz/s3po',
packages=['s3po', 's3po.backends'],
license='MIT',
platforms='Posix; MacOS X',
install_requires=[
'boto3',
'coverage',
'gevent',
'mock',
'nose',
'python_swiftclient',
'six'
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| seomoz/s3po | setup.py | Python | mit | 2,278 |
from django.core.management import setup_environ
import settings
setup_environ(settings)
from apps.modules.tasks import update_data
update_data.delay()
| udbhav/eurorack-planner | scripts/update_data.py | Python | mit | 167 |
import numpy as np
import cv2
import matplotlib.image as mpimg
import pickle
from line import Line
from warp_transformer import WarpTransformer
from moviepy.editor import VideoFileClip
calibration_mtx_dist_filename = 'dist_pickle.p'
# load mtx, dist
dist_pickle = pickle.load(open(calibration_mtx_dist_filename, "rb" ))
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
def binary_image_via_threshold(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
'''
From Advanced Lane Finding lesson, section 30
'''
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# combined them here.
combined = np.zeros_like(s_channel)
combined[(sxbinary == 1) | (s_binary == 1)] = 1
return combined
# Perspective transform
src = np.array([[262, 677], [580, 460], [703, 460], [1040, 677]]).astype(np.float32)
dst = np.array([[262, 720], [262, 0], [1040, 0], [1040, 720]]).astype(np.float32)
# Create transformer object
transformer = WarpTransformer(src, dst)
left_line = Line()
right_line = Line()
def non_sliding(binary_warped, line):
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_fit = left_line.current_fit
right_fit = right_line.current_fit
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin))
& (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin))
& (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
if line == 'left':
return leftx, lefty
elif line == 'right':
return rightx, righty
def sliding_window(binary_warped, line):
out_img = (np.dstack((binary_warped, binary_warped, binary_warped)) * 255).astype(np.uint8)
histogram = np.sum(binary_warped[int(binary_warped.shape[0]/2):,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low,win_y_low), (win_xleft_high,win_y_high), color=(0,255,0), thickness=2) # Green
cv2.rectangle(out_img, (win_xright_low,win_y_low), (win_xright_high,win_y_high), color=(0,255,0), thickness=2) # Green
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
if line == 'left':
return leftx, lefty
elif line == 'right':
return rightx, righty
def pipeline(start_img):
'''
Incoming image must be RGB!!
'''
undist = cv2.undistort(start_img, mtx, dist, None, mtx)
combined = binary_image_via_threshold(undist)
binary_warped = transformer.to_birdview(combined)
# Check if line was detected in previous frame:
if left_line.detected == True:
leftx, lefty = non_sliding(binary_warped, 'left')
elif left_line.detected == False:
leftx, lefty = sliding_window(binary_warped, 'left')
left_line.detected = True
if right_line.detected == True:
rightx, righty = non_sliding(binary_warped, 'right')
elif right_line.detected == False:
rightx, righty = sliding_window(binary_warped, 'right')
right_line.detected = True
# Fit a second order polynomial to each
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Stash away polynomials
left_line.current_fit = left_fit
right_line.current_fit = right_fit
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit new polynomials to x,y in world space
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, deg=2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, deg=2)
# Calculate radii of curvature in meters
y_eval = np.max(ploty) # Where radius of curvature is measured
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
midpoint = np.int(start_img.shape[1]/2)
middle_of_lane = (right_fitx[-1] - left_fitx[-1]) / 2.0 + left_fitx[-1]
offset = (midpoint - middle_of_lane) * xm_per_pix
# Create an image to draw the lines on
warped_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warped = np.dstack((warped_zero, warped_zero, warped_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warped, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
unwarped = transformer.to_normal(color_warped)
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, unwarped, 0.3, 0)
radius = np.mean([left_curverad, right_curverad])
# Add radius and offset calculations to top of video
cv2.putText(result,"Curvature Radius: " + "{:0.2f}".format(radius) + ' m', org=(50,50), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=2, color=(0,0,0), lineType = cv2.LINE_AA, thickness=2)
cv2.putText(result,"Lane center offset: " + "{:0.2f}".format(offset) + ' m', org=(50,100), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=2, color=(0,0,0), lineType = cv2.LINE_AA, thickness=2)
return result
if __name__ == '__main__':
movie_output = 'final_output.mp4'
clip1 = VideoFileClip("project_video.mp4")
driving_clip = clip1.fl_image(pipeline)
driving_clip.write_videofile(movie_output, audio=False)
| mez/carnd | P4_advance_lane_finding/main.py | Python | mit | 9,709 |
from setuptools import setup, find_packages
setup(
name="Coinbox-mod-customer",
version="0.2",
packages=find_packages(),
zip_safe=True,
namespace_packages=['cbmod'],
include_package_data=True,
install_requires=[
'sqlalchemy>=0.7, <1.0',
'PyDispatcher>=2.0.3, <3.0',
'ProxyTypes>=0.9, <1.0',
'Babel>=1.3, <2.0',
'PySide>=1.0,<2.0'
],
author='Coinbox POS Team',
author_email='[email protected]',
description='Coinbox POS customer module',
license='MIT',
url='http://coinboxpos.org/'
)
| coinbox/coinbox-mod-customer | setup.py | Python | mit | 654 |
#!/usr/bin/env python
'''
Retrospectively updates older FFV1/DV packages in order to meet our current
packaging requirements. This should allow accession.py and makepbcore.py to run as
expected. This script should work on files created by:
makeffv1.py
dvsip.py
loopline.py
'''
import argparse
import sys
import shutil
import os
import time
import ififuncs
def parse_args(args_):
'''
Parse command line arguments.
'''
parser = argparse.ArgumentParser(
description='Retrospectively updates older FFV1/DV packages in order to'
'meet our current packaging requirements. This should allow'
' accession.py and makepbcore.py to run as expected.'
' Written by Kieran O\'Leary.'
)
parser.add_argument(
'input', help='Input directory'
)
parser.add_argument(
'-start_number',
help='Enter the Object Entry number for the first package. The script will increment by one for each subsequent package.'
)
parser.add_argument(
'-technical',
help='Path to technical/PBCore CSV.'
)
parser.add_argument(
'-filmographic',
help='Path to Filmographic CSV. Must contain reference numbers.'
)
parsed_args = parser.parse_args(args_)
return parsed_args
def get_numbers(args):
'''
Figure out the first OE number and how to increment per package.
'''
if args.start_number:
if args.start_number[:2] != 'oe':
print 'First two characters must be \'oe\' and last four characters must be four digits'
object_entry = ififuncs.get_object_entry()
elif len(args.start_number[2:]) not in range(4, 6):
print 'First two characters must be \'oe\' and last four characters must be four digits'
object_entry = ififuncs.get_object_entry()
elif not args.start_number[2:].isdigit():
object_entry = ififuncs.get_object_entry()
print 'First two characters must be \'oe\' and last four characters must be four digits'
else:
object_entry = args.start_number
else:
object_entry = ififuncs.get_object_entry()
object_entry_digits = int(object_entry[2:])
new_object_entry = 'oe' + str(object_entry_digits)
return new_object_entry
def update_manifest(manifest, old_oe, uuid):
'''
Updates the existing checksum manifest by replacing OE numbers with
UUIDs where appropriate. Anything logfiles or metadata relating to the
original v210.mov will be left alone.
'''
updated_lines = []
with open(manifest, 'r') as file_object:
checksums = file_object.readlines()
for line in checksums:
if old_oe in line:
if 'source' in line:
# if source (v210) logs or metadata exist, leave filename
# alone, just change the path.
line = line[:40].replace(old_oe, uuid) + line[40:]
elif '.mov_log.log' in line:
line = line.replace(old_oe, uuid).replace('.mov_log', '_sip_log')
else:
line = line.replace(old_oe, uuid)
updated_lines.append(line)
return updated_lines
def rename_files(new_uuid_path, old_oe, uuid, manifest, logname):
'''
Renames files from OE numbers to UUID where appropriate.
'''
for root, _, filenames in os.walk(new_uuid_path):
for filename in filenames:
if old_oe in filename:
if 'source' not in filename:
if '.mov_log.log' in filename:
new_filename = os.path.join(root, filename).replace('.mov_log', '_sip_log').replace(old_oe, uuid)
os.rename(os.path.join(root, filename), new_filename)
logname = new_filename
ififuncs.generate_log(
logname,
'EVENT = eventType=Filename change,'
' eventOutcomeDetailNote=%s changed to %s'
% (os.path.join(root, filename), new_filename)
)
else:
new_filename = os.path.join(root, filename).replace(old_oe, uuid)
os.rename(os.path.join(root, filename), new_filename)
ififuncs.generate_log(
logname,
'EVENT = eventType=Filename change,'
' eventOutcomeDetailNote=%s changed to %s'
% (os.path.join(root, filename), new_filename)
)
return logname
def move_files(root, new_object_entry, old_oe_path, old_uuid_path, uuid):
'''
Moves files into their new folder paths.
'''
new_oe_path = os.path.join(
os.path.dirname(root),
new_object_entry
)
os.makedirs(new_oe_path)
os.rename(old_oe_path, old_uuid_path)
new_uuid_path = os.path.join(new_oe_path, uuid)
shutil.move(old_uuid_path, new_oe_path)
return new_oe_path, new_uuid_path
def make_register():
'''
This sends a placeholder accessions register to the desktop logs directory.
This should get rid of some of the more painful, repetitive identifier matching.
'''
desktop_logs_dir = ififuncs.make_desktop_logs_dir()
register = os.path.join(
desktop_logs_dir,
'oe_register_' + time.strftime("%Y-%m-%dT%H_%M_%S.csv")
)
ififuncs.create_csv(register, (
'OE No.',
'Date Received',
'Quantity',
'Format',
'Description',
'Contact Name',
'Type of Acquisition',
'Accession Number',
'Additional Information',
'Habitat',
'Vinegar No.'
))
return register
def get_date_modified(directory):
'''
Returns the date modified of a file in DD/MM/YYYY, which is
the format used for the Object Entry register. yes, we should be using
ISO8601 but we'll fix this later.
'''
file_list = ififuncs.recursive_file_list(directory)
# This will blindly use the first video file it encounters.
# This is fine for this project as all the objects folders contain single files.
extension = os.path.splitext(file_list[0])[1]
return time.strftime('%m/%d/%Y', time.gmtime(os.path.getmtime(file_list[0]))), extension
def main(args_):
'''
Retrospectively updates older FFV1/DV packages in order to meet our current
packaging requirements. This should allow accession.py and makepbcore.py to run as
expected. This script should work on files created by:
makeffv1.py
dvsip.py
loopline.py
'''
args = parse_args(args_)
user = ififuncs.get_user()
new_object_entry = get_numbers(args)
filmographic_csv = args.filmographic
technical_csv = args.technical
filmographic_oe_list = []
filmo_csv_extraction = ififuncs.extract_metadata(filmographic_csv)
tech_csv_extraction = ififuncs.extract_metadata(technical_csv)
register = make_register()
for line_item in filmo_csv_extraction[0]:
dictionary = {}
oe_number = line_item['Object Entry'].lower()
dictionary['title'] = line_item['Title']
if dictionary['title'] == '':
dictionary['title'] = '%s - %s' % (line_item['TitleSeries'], line_item['EpisodeNo'])
dictionary['uppercase_dashed_oe'] = oe_number.upper()
for tech_record in tech_csv_extraction[0]:
if tech_record['Reference Number'] == dictionary['uppercase_dashed_oe']:
dictionary['source_accession_number'] = tech_record['Accession Number']
dictionary['filmographic_reference_number'] = tech_record['new_ref']
# this transforms OE-#### to oe####
dictionary['old_oe'] = oe_number[:2] + oe_number[3:]
filmographic_oe_list.append(dictionary)
for oe_package in filmographic_oe_list:
for root, _, filenames in os.walk(args.input):
if os.path.basename(root) == oe_package['old_oe']:
old_oe_path = root
old_oe = os.path.basename(root)
log_dir = os.path.join(root, 'logs')
for files in os.listdir(log_dir):
if '.mov_log.log' in files:
log = os.path.join(log_dir, files)
manifest = os.path.join(
os.path.dirname(root),
old_oe + '_manifest.md5'
)
uuid = ififuncs.create_uuid()
uuid_event = (
'EVENT = eventType=Identifier assignement,'
' eventIdentifierType=UUID, value=%s, module=uuid.uuid4'
) % uuid
ififuncs.generate_log(
log,
'EVENT = loopline_repackage.py started'
)
ififuncs.generate_log(
log,
'eventDetail=loopline_repackage.py %s' % ififuncs.get_script_version('loopline_repackage.py')
)
ififuncs.generate_log(
log,
'Command line arguments: %s' % args
)
ififuncs.generate_log(
log,
'EVENT = agentName=%s' % user
)
ififuncs.generate_log(
log,
uuid_event
)
ififuncs.generate_log(
log,
'EVENT = eventType=Identifier assignement,'
' eventIdentifierType=object entry, value=%s'
% new_object_entry
)
ififuncs.generate_log(
log,
'EVENT = eventType=Identifier assignement,'
' eventIdentifierType=Filmographic reference number , value=%s'
% oe_package['filmographic_reference_number']
)
oe_package['new_object_entry'] = new_object_entry
print('Transforming %s into %s' % (oe_package['old_oe'], oe_package['new_object_entry']))
ififuncs.generate_log(
log,
'Relationship, derivation, has source=%s' % oe_package['source_accession_number']
)
old_uuid_path = os.path.join(os.path.dirname(root), uuid)
new_oe_path, new_uuid_path = move_files(
root, new_object_entry, old_oe_path, old_uuid_path, uuid
)
updated_lines = update_manifest(manifest, old_oe, uuid)
new_manifest = os.path.join(new_oe_path, uuid) + '_manifest.md5'
shutil.move(manifest, new_manifest)
with open(new_manifest, 'w') as fo:
for lines in updated_lines:
fo.write(lines)
new_logs_path = os.path.join(new_uuid_path, 'logs')
for files in os.listdir(new_logs_path):
if '.mov_log.log' in files:
log = os.path.join(new_logs_path, files)
logname = rename_files(new_uuid_path, old_oe, uuid, new_manifest, log)
date_modified, extension = get_date_modified(new_uuid_path)
# This normally would be bad practise, but this project only has two formats. MOV/DV and FFv1/MKV
if extension == '.mkv':
av_format = 'FFV1/PCM/Matroska'
elif extension == '.mov':
av_format = 'DV/PCM/QuickTime'
provenance_string = 'Reproduction of %s' % oe_package['source_accession_number']
ififuncs.append_csv(register, (oe_package['new_object_entry'].upper()[:2] + '-' + oe_package['new_object_entry'][2:],date_modified, '1',av_format,oe_package['title'],'contact_name','Reproduction','', provenance_string, '', ''))
ififuncs.generate_log(
logname,
'EVENT = loopline_repackage.py finished'
)
ififuncs.checksum_replace(new_manifest, logname, 'md5')
oe_digits = int(os.path.basename(new_oe_path)[2:]) + 1
new_object_entry = 'oe' + str(oe_digits)
if __name__ == '__main__':
main(sys.argv[1:])
| kieranjol/IFIscripts | loopline_repackage.py | Python | mit | 12,437 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-05 14:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('story', '0037_auto_20170405_1401'),
]
operations = [
migrations.AlterField(
model_name='story',
name='language',
field=models.CharField(choices=[('English', 'English')], max_length=10, verbose_name='Language'),
),
]
| OrhanOdabasi/weirdbutreal | story/migrations/0038_auto_20170405_1404.py | Python | mit | 509 |
import string
import random
def random_secret(n=45):
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(n))
| Wiredcraft/pipelines | pipelines/pipeline/utils.py | Python | mit | 162 |
#!/usr/bin/env python
# coding=utf-8
__author__ = 'Jayin Ton'
from flask import Flask, request, session, redirect, url_for
app = Flask(__name__)
host = '127.0.0.1'
port = 8000
# 使用session 必须设置secret_key
# set the secret key. keep this really secret:
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
@app.route('/')
def index():
print session.__dict__
print session
if 'username' in session:
return 'Logged in as %s' % session['username']
return 'You are not logged in'
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session['username'] = request.form['username']
return redirect(url_for('index'))
return '''
<form action="" method="post">
<p><input type=text name=username>
<p><input type=submit value=Login>
</form>
'''
@app.route('/see',methods=['GET'])
def see():
print session.__dict__
print session
return "see"
@app.route('/logout')
def logout():
if 'username' in session:
# remove the username from the session if it's there
session.pop('username', None)
return redirect(url_for('index'))
else:
return '''
<p> Not logint yet</p>
<p> Click</p><a href="./login">Here</a> to login!
'''
if __name__ == '__main__':
app.run(host=host, port=port, debug=True)
| jayinton/FlaskDemo | simple/sessionTest.py | Python | mit | 1,407 |
from flask_bcrypt import check_password_hash
from project.user.services.create_user_service import CreateUserService
from project.user.services.login_user_service import LoginUserService
from project.user.services.logout_user_service import LogoutUserService
from project.user.finders.user_finder import UserFinder
from project.utils import flash
class UserHandler:
@classmethod
def login(cls, form):
"""
Login user by giving "LoginForm" form and validate if username+password pair is valid
"""
if not form.validate_on_submit():
flash.danger(u'Form is not valid.')
return False
user = UserFinder.by_username(form.username.data)
if not user:
flash.danger(u'User {} does not exists.'.format(form.username.data))
return False
if not check_password_hash(user.password, form.password.data):
flash.warning(u'Invalid Credentials. Please try again.')
return False
LoginUserService(user).call()
return user
@classmethod
def logout(cls):
"""
Delete user session
"""
return LogoutUserService().call()
@classmethod
def register(cls, form):
"""
Create new user by given "RegisterForm"
"""
if not form.validate_on_submit():
flash.danger(u'Form is not valid.')
return False
user = CreateUserService(form.username.data, form.password.data).call()
return LoginUserService(user).call()
| andreffs18/flask-template-project | project/user/handlers/user_handler.py | Python | mit | 1,551 |
# coding: utf-8
import copy
import json
import gzip
from cStringIO import StringIO
from datetime import datetime
import arrow
import iso8601
from dateutil import tz
import ML
from ML import operation
__author__ = 'czhou <[email protected]>'
def get_dumpable_types():
return (
operation.BaseOp,
)
def encode(value, disallow_objects=False):
if isinstance(value, datetime):
tzinfo = value.tzinfo
if tzinfo is None:
tzinfo = tz.tzlocal()
return {
'__type': 'Date',
'iso': arrow.get(value, tzinfo).to('utc').format('YYYY-MM-DDTHH:mm:ss.SSS') + 'Z',
}
if isinstance(value, ML.Object):
if disallow_objects:
raise ValueError('ML.Object not allowed')
return value._to_pointer()
if isinstance(value, get_dumpable_types()):
return value.dump()
if isinstance(value, (tuple, list)):
return [encode(x, disallow_objects) for x in value]
if isinstance(value, dict):
return dict([(k, encode(v, disallow_objects)) for k, v in value.iteritems()])
return value
def decode(key, value):
if isinstance(value, get_dumpable_types()):
return value
if isinstance(value, (tuple, list)):
return [decode(key, x) for x in value]
if not isinstance(value, dict):
return value
if '__type' not in value:
return dict([(k, decode(k, v)) for k, v in value.iteritems()])
_type = value['__type']
if _type == 'Pointer':
value = copy.deepcopy(value)
class_name = value['className']
pointer = ML.Object.create(class_name)
if 'createdAt' in value:
value.pop('__type')
value.pop('className')
pointer._finish_fetch(value, True)
else:
pointer._finish_fetch({'objectId': value['objectId']}, False)
return pointer
if _type == 'Object':
value = copy.deepcopy(value)
class_name = value['className']
value.pop('__type')
value.pop('class_name')
obj = ML.Object.create(class_name)
obj._finish_fetch(value, True)
return obj
if _type == 'Date':
return arrow.get(iso8601.parse_date(value['iso'])).to('local').datetime
if _type == 'Relation':
relation = ML.Relation(None, key)
relation.target_class_name = value['className']
return relation
def traverse_object(obj, callback, seen=None):
seen = seen or set()
if isinstance(obj, ML.Object):
if obj in seen:
return
seen.add(obj)
traverse_object(obj.attributes, callback, seen)
return callback(obj)
if isinstance(obj, (ML.Relation )):
return callback(obj)
if isinstance(obj, (list, tuple)):
for idx, child in enumerate(obj):
new_child = traverse_object(child, callback, seen)
if new_child:
obj[idx] = new_child
return callback(obj)
if isinstance(obj, dict):
for key, child in obj.iteritems():
new_child = traverse_object(child, callback, seen)
if new_child:
obj[key] = new_child
return callback(obj)
return callback(obj)
def response_to_json(response):
"""
hack for requests in python 2.6
"""
if isinstance(response, ML.Response):
return json.loads(response.data)
content = response.content
# hack for requests in python 2.6
if 'application/json' in response.headers.get('Content-Type',''):
if content[:2] == '\x1f\x8b':
f = StringIO(content)
g = gzip.GzipFile(fileobj=f)
content = g.read()
g.close()
f.close()
return json.loads(content)
| MaxLeap/SDK-CloudCode-Python | ML/utils.py | Python | cc0-1.0 | 3,771 |
#!/usr/bin/python
import os
import time
import moveServo
import os.path
from multiprocessing import Process
my_dir = os.path.dirname(__file__)
def main():
moveServo.init_candy()
moveServo.move_servo_ext(0, 180, 25)
print "sleep 5"
# time.sleep(1)
moveServo.move_servo_ext(180, 90, 25)
print "sleep 5"
# time.sleep(1)
moveServo.move_servo_ext(90, 0, 25)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print "Keyboard interrupt received. Cleaning up..."
moveServo.del_all_servos()
print "Keyboard interrupt Clean up done"
| intelmakers/candy_machine | Python/test_servo.py | Python | cc0-1.0 | 625 |
from RebotConfig import RebotConfig
from Log import Log
from exchange.huobi.HuobiUtil import *
from exchange.huobi.HuobiService import *
import json
import time
import math
'''{1min, 5min, 15min, 30min, 60min, 1day, 1mon, 1week, 1year }'''
PERIOD2TYPE = {
1 : '1min',
5 : '5min',
15 : '15min',
30 : '30min',
60 : '60min',
}
def PERIOD(period):
if period >= 60:
return '60min';
return PERIOD2TYPE.get(period);
def TIMEHOUR(timestamp):
return float(time.strftime("%H", time.localtime(timestamp)));
def CreateDefalutKline():
return {
'id' : 0,
'open' : 0,
'high' : 0,
'low' : 999999999,
'close' : 0,
'amount': 0,
'vol' : 0, }
def SortCompare(a, b):
return a['id'] < b['id'];
def Cut(num, c):
s = '{:.9f}'.format(num);
pos = s.find('.');
if pos > 0:
return float(s[0:pos+c+1]);
else:
return num;
def ConvertData(preiod1, data, period2):
ndata = [];
kcount = period2 / preiod1;
datalenght = len(data);
nk = None;
for key in range(1, datalenght):
k = data[datalenght - 1 - key];
prek = data[datalenght - key];
h = TIMEHOUR(k['id']);
idx = h % kcount;
if idx == 0:
if nk != None:
nk['close'] = prek['close'];
nk['high'] = max(nk['high'], prek['high']);
nk['low'] = min(nk['low'], prek['low']);
nk['amount']= nk['amount'] + nk['preamount'];
nk['vol'] = nk['vol'] + nk['prevol'];
nk = CreateDefalutKline();
ndata.append(nk);
nk['id'] = k['id'];
nk['open'] = k['open'];
nk['preamount'] = 0;
nk['prevol'] = 0;
nk['idx'] = idx;
if nk != None:
nk['close'] = k['close'];
nk['high'] = max(nk['high'], k['high']);
nk['low'] = min(nk['low'], k['low']);
if nk['idx'] != idx:
nk['preamount'] += prek['amount'];
nk['prevol'] += prek['vol'];
nk['idx'] = idx;
nk['amount'] = nk['preamount'] + k['amount'];
nk['vol'] = nk['prevol'] + k['vol'];
ndata.reverse();
#for k,v in enumerate(ndata):
# print v, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(v['id']));
return ndata;
def ConvertKlineData(data):
ndata = [];
lendatadata = len(data);
for k in range(0, lendatadata):
v = data[lendatadata - 1 - k];
d = [0,1,2,3,4,5,6];
d[0] = v['id'];
d[1] = v['open'];
d[2] = v['high'];
d[3] = v['low'];
d[4] = v['close'];
d[5] = v['amount'];
d[6] = v['vol'];
ndata.append(d);
return ndata;
class huobiEX():
def set(self, access, secret):
set_user_key(access, secret);
self.orders = {};
self.marketOrders = {};
self.precisions = {};
def createOrder(self, id, market, side, time, price, volume, ext):
o = {
'id':id,
'side':side, # sell, buy
'price':price,
'avg_price':price,
'state':'wait', # wait, done, cancel
'market':market,
'created_at':time,
'volume':volume,
'remaining_volume':volume,
'executed_volume':0,
'ext':ext,
}
self.orders[id] = o;
d = self.marketOrders.get(market);
if d==None:
self.marketOrders[market] = [];
d = self.marketOrders.get(market);
d.append(o);
return id;
def getPrice(self, market, price):
d = self.precisions[market];
if d != None:
return round(price, int(d['price-precision']));
else :
return round(price, 0);
def getVolume(self, market, vol):
d = self.precisions[market];
if d != None:
return Cut(vol, int(d['amount-precision']));
else :
return Cut(vol, 0);
# function
def loadData(self, period, timestamp):
return None;
def prepare(self, period, timestamp):
d = get_symbols();
markets = [];
for k,v in enumerate(d['data']):
if v['quote-currency'] == 'usdt':
key = v['base-currency'] + 'usdt';
if key != 'venusdt':
self.precisions[key] = v;
markets.append({'id':key});
self.markets = markets;
def getServerTimestamp(self):
return time.time();
def getUser(self):
data = get_balance();
nndata = [];
if data['status'] != 'ok':
return {'accounts':nndata};
if data['data']['state'] != 'working':
return {'accounts':nndata};
listdata = data['data']['list'];
ndata = {};
for k,v in enumerate(listdata):
currency = v['currency'];
c = ndata.get(currency);
if c == None:
c = {'currency': currency};
ndata[currency] = c;
if v['type'] == 'trade':
c['balance'] = float(v['balance']);
if v['type'] == 'frozen':
c['locked'] = float(v['balance']);
for k,v in enumerate(ndata):
d = ndata.get(v);
if d['balance'] > 0 or d['locked'] > 0:
nndata.append(d);
return {'accounts':nndata};
def getMarkets(self):
if len(RebotConfig.rebot_trade_markets) > 0:
return RebotConfig.rebot_trade_markets;
return self.markets;
def getK(self, market, limit, period, timestamp=None):
data = None;
if period > 60:
data = get_kline(market, PERIOD(period), limit * period / 60);
else:
data = get_kline(market, PERIOD(period), limit);
if data['status'] != 'ok':
return [];
datadata = data['data'];
if period > 60 :
datadata = ConvertData(60, datadata, period);
return ConvertKlineData(datadata);
def getOrder(self, market):
'''
ret = self.marketOrders.get(market);
if ret == None:
return [];
'remaining_volume':volume,
'executed_volume':0,
for k, o in enumerate(ret):
data = order_info(o['id']);
if data['status'] == 'ok':
data = data['data'];
o['remaining_volume'] = float(data['amount']) - float(data['field-amount']);
o['executed_volume'] = float(data['field-amount']);
o['averageprice'] = float(data['field-cash-amount']) / float(data['field-amount']);
'''
data = orders_list(market, "pre-submitted,submitted,partial-filled,partial-canceled,filled,canceled");
if data['status'] != 'ok':
return [];
ret = data['data'];
for k, o in enumerate(data['data']):
o['id'] = int(o['id']);
o['created_at'] = float(o['created-at'])/1000;
o['side'] = o['type'][0:3];
if o['side'] != 'buy':
o['side'] = 'sell';
o['price'] = float(o['price']);
o['market'] = o['symbol'];
o['volume'] = float(o['amount']);
o['remaining_volume'] = float(o['amount']) - float(o['field-amount']);
o['executed_volume'] = float(o['field-amount']);
if o['executed_volume'] > 0:
o['avg_price'] = float(o['field-cash-amount']) / float(o['field-amount']);
else:
o['avg_price'] = 0;
# pre-submitted,submitted,partial-filled,partial-canceled,filled,canceled
if o['state'] == "canceled" or o['state'] == 'partial-canceled':
o['state'] = "cancel";
if o['state'] == 'filled':
o['state'] = 'compelete';
return ret;
def doOrder(self, market, side, price, volume, time=None, ext=None):
volume = self.getVolume(market, volume);
price = self.getPrice(market, price);
if volume <= 0:
Log.d("\t\tvolume in precision is nil");
return True, price, volume;
nside = 'buy-limit';
if side == 'sell':
nside = 'sell-limit';
result = send_order(volume, 'api', market, nside, price);
if result['status'] != 'ok':
Log.d('\t\tdo order result {0}'.format(result));
return False, price, volume;
# self.createOrder(result['data'], market, side, price, volume, time, ext);
return True, price, volume;
def doOrderCancel(self, orderID, market):
data = cancel_order(orderID);
if data['status'] != "ok":
return False;
return True;
class huobiEXLocal():
def set(self, access, secret):
set_user_key(access, secret)
self.accounts = {
'usdt' : {'currency':'usdt', 'balance':'%d' % RebotConfig.user_initamount, 'locked':'0.0'},
};
self.orders = {};
self.marketOrders = {};
self.ORDERID = 0;
self.kss = {};
self.allMarkets = None;
self.currentMarkets = None;
self.poundage = 0;#0.0001;
self.precisions = {};
def getPrice(self, market, price):
d = self.precisions[market];
if d != None:
return round(price, int(d['price-precision']));
else :
return round(price, 0);
def getVolume(self, market, vol):
d = self.precisions[market];
if d != None:
return Cut(vol, int(d['amount-precision']));
else :
return Cut(vol, 0);
def createOrder(self, market, side, time, price, volume, ext):
if volume<=0:
return None;
self.ORDERID += 1;
id = self.ORDERID;
o = {
'id':id,
'side':side, # sell, buy
'price':price,
'avg_price':price,
'state':'wait', # wait, done, cancel
'market':market,
'created_at':time,
'volume':volume,
'remaining_volume':volume,
'executed_volume':0,
'ext':ext
}
self.orders[id] = o;
d = self.marketOrders.get(market);
if d==None:
self.marketOrders[market] = [];
d = self.marketOrders.get(market);
d.append(o);
return id;
def compeleteOrder(self, id):
o = self.orders.get(id);
if o==None:
return;
market = o['market'];
currency = market[0:len(market) - len(RebotConfig.base_currency)];
o['remaining_volume']=0;
o['executed_volume']=o['volume'];
o['state']='done';
if o['side'] == 'sell':
c = self.accounts.get(currency);
balance = float(c['balance']);
c['balance'] = str(balance - o['executed_volume']);
ccny = self.accounts.get(RebotConfig.base_currency);
ccny['balance'] = str(float(ccny['balance']) + o['executed_volume'] * o['avg_price'] * (1 - self.poundage) );
print '\t\tsell', market, balance, c['balance']
if o['side'] == 'buy':
c = self.accounts.get(currency);
if c==None:
self.accounts[currency] = {'currency':currency, 'balance':'0.0', 'locked':'0.0', 'price':0.0};
c = self.accounts.get(currency);
balance = float(c['balance']);
price = c['price'];
addbalance = o['executed_volume'] * (1 - self.poundage);
addprice = o['avg_price'];
print '\t\tbuy',market, balance, addbalance
c['balance'] = str(balance + addbalance);
c['price'] = (balance)/(balance+addbalance)*price + addbalance/(balance+addbalance)*addprice;
ccny = self.accounts.get(RebotConfig.base_currency);
ccny['balance'] = str(float(ccny['balance']) - addbalance*addprice);
# function
def loadData(self, period, timestamp):
return None;
def prepare(self, period, timestamp):
d = get_symbols();
markets = []
for k,v in enumerate(d['data']):
if v['quote-currency'] == 'usdt':
key = v['base-currency'] + 'usdt';
self.precisions[v['base-currency'] + 'usdt'] = v;
markets.append({'id':key});
self.markets = markets;
def getServerTimestamp(self):
return time.time();
def getUser(self):
d = {}
accounts = [];
for k,v in self.accounts.items():
accounts.append(v);
d['accounts']=accounts;
return d;
def getMarkets(self):
if len(RebotConfig.rebot_trade_markets) > 0:
return RebotConfig.rebot_trade_markets;
return self.markets;
#return [{'id':'anscny'},{'id':'btccny'}, {'id':'ethcny'}, {'id':'zeccny'}, {'id':'qtumcny'}, {'id':'gxscny'}, {'id':'eoscny'}, {'id':'sccny'}, {'id':'dgdcny'}, {'id':'1stcny'}, {'id':'btscny'}, {'id':'gntcny'}, {'id':'repcny'}, {'id':'etccny'}];
#return [{'id':'anscny'}];
def getK(self, market, limit, period, timestamp=None):
if RebotConfig.rebot_is_test == False:
data = None;
if period > 60:
data = get_kline(market, PERIOD(period), limit * period / 60);
else:
data = get_kline(market, PERIOD(period), limit);
if data['status'] != 'ok':
return [];
datadata = data['data'];
if period > 60 :
datadata = ConvertData(60, datadata, period);
return ConvertKlineData(datadata);
ks = self.kss.get(market);
if ks==None:
data = None;
if period > 60:
data = get_kline(market, PERIOD(period), RebotConfig.rebot_test_k_count * period / 60);
else:
data = get_kline(market, PERIOD(period), RebotConfig.rebot_test_k_count);
datadata = data['data'];
if period > 60 :
datadata = ConvertData(60, datadata, period);
# print "kline length", len(datadata), RebotConfig.rebot_test_k_count * period / 60, 'xxxxxxxxxxxxxxxxx';
self.kss[market] = ConvertKlineData(datadata);
ks = self.kss.get(market);
# time.sleep(0.01);
# print timestamp, len(ks), ks[-1][0], limit
if ks == None or len(ks) == 0:
print '%s do not find kline' % market
if timestamp > ks[-1][0]:
print '{0} k line is over'.format(market);
return [];
ret = [];
for k,v in enumerate(ks):
if v[0] >= timestamp:
ret.append(v);
if len(ret) >= limit:
return ret;
return ret;
def getOrder(self, market):
ret = self.marketOrders.get(market);
if ret==None:
return [];
return ret;
def doOrder(self, market, side, price, volume, time=None, ext=None):
price = self.getPrice(market, price);
volume = self.getVolume(market, volume);
id = self.createOrder(market, side, time, price, volume, ext)
if id:
self.compeleteOrder(id);
return True, price, volume;
def doOrderCancel(self, orderID, market):
return None;
| WaitGodot/peatio-client-python | exchange/huobiEX.py | Python | cc0-1.0 | 15,480 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.scripts.tap2rpm}.
"""
import os
from twisted.trial.unittest import TestCase, SkipTest
from twisted.python import procutils
from twisted.python.failure import Failure
from twisted.internet import utils
from twisted.scripts import tap2rpm
# When we query the RPM metadata, we get back a string we'll have to parse, so
# we'll use suitably rare delimiter characters to split on. Luckily, ASCII
# defines some for us!
RECORD_SEPARATOR = "\x1E"
UNIT_SEPARATOR = "\x1F"
def _makeRPMs(tapfile=None, maintainer=None, protocol=None, description=None,
longDescription=None, setVersion=None, rpmfile=None, type_=None):
"""
Helper function to invoke tap2rpm with the given parameters.
"""
args = []
if not tapfile:
tapfile = "dummy-tap-file"
handle = open(tapfile, "w")
handle.write("# Dummy TAP file\n")
handle.close()
args.extend(["--quiet", "--tapfile", tapfile])
if maintainer:
args.extend(["--maintainer", maintainer])
if protocol:
args.extend(["--protocol", protocol])
if description:
args.extend(["--description", description])
if longDescription:
args.extend(["--long_description", longDescription])
if setVersion:
args.extend(["--set-version", setVersion])
if rpmfile:
args.extend(["--rpmfile", rpmfile])
if type_:
args.extend(["--type", type_])
return tap2rpm.run(args)
def _queryRPMTags(rpmfile, taglist):
"""
Helper function to read the given header tags from the given RPM file.
Returns a Deferred that fires with dictionary mapping a tag name to a list
of the associated values in the RPM header. If a tag has only a single
value in the header (like NAME or VERSION), it will be returned as a 1-item
list.
Run "rpm --querytags" to see what tags can be queried.
"""
# Build a query format string that will return appropriately delimited
# results. Every field is treated as an array field, so single-value tags
# like VERSION will be returned as 1-item lists.
queryFormat = RECORD_SEPARATOR.join([
"[%%{%s}%s]" % (tag, UNIT_SEPARATOR) for tag in taglist
])
def parseTagValues(output):
res = {}
for tag, values in zip(taglist, output.split(RECORD_SEPARATOR)):
values = values.strip(UNIT_SEPARATOR).split(UNIT_SEPARATOR)
res[tag] = values
return res
def checkErrorResult(failure):
# The current rpm packages on Debian and Ubuntu don't properly set up
# the RPM database, which causes rpm to print a harmless warning to
# stderr. Unfortunately, .getProcessOutput() assumes all warnings are
# catastrophic and panics whenever it sees one.
#
# See also:
# http://twistedmatrix.com/trac/ticket/3292#comment:42
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=551669
# http://rpm.org/ticket/106
failure.trap(IOError)
# Depending on kernel scheduling, we might read the whole error
# message, or only the first few bytes.
if str(failure.value).startswith("got stderr: 'error: "):
newFailure = Failure(SkipTest("rpm is missing its package "
"database. Run 'sudo rpm -qa > /dev/null' to create one."))
else:
# Not the exception we were looking for; we should report the
# original failure.
newFailure = failure
# We don't want to raise the exception right away; we want to wait for
# the process to exit, otherwise we'll get extra useless errors
# reported.
d = failure.value.processEnded
d.addBoth(lambda _: newFailure)
return d
d = utils.getProcessOutput("rpm",
("-q", "--queryformat", queryFormat, "-p", rpmfile))
d.addCallbacks(parseTagValues, checkErrorResult)
return d
class TestTap2RPM(TestCase):
def setUp(self):
return self._checkForRpmbuild()
def _checkForRpmbuild(self):
"""
tap2rpm requires rpmbuild; skip tests if rpmbuild is not present.
"""
if not procutils.which("rpmbuild"):
raise SkipTest("rpmbuild must be present to test tap2rpm")
def _makeTapFile(self, basename="dummy"):
"""
Make a temporary .tap file and returns the absolute path.
"""
path = basename + ".tap"
handle = open(path, "w")
handle.write("# Dummy .tap file")
handle.close()
return path
def _verifyRPMTags(self, rpmfile, **tags):
"""
Check the given file has the given tags set to the given values.
"""
d = _queryRPMTags(rpmfile, tags.keys())
d.addCallback(self.assertEqual, tags)
return d
def test_optionDefaults(self):
"""
Commandline options should default to sensible values.
"sensible" here is defined as "the same values that previous versions
defaulted to".
"""
config = tap2rpm.MyOptions()
config.parseOptions([])
self.assertEqual(config['tapfile'], 'twistd.tap')
self.assertEqual(config['maintainer'], 'tap2rpm')
self.assertEqual(config['protocol'], 'twistd')
self.assertEqual(config['description'], 'A TCP server for twistd')
self.assertEqual(config['long_description'],
'Automatically created by tap2rpm')
self.assertEqual(config['set-version'], '1.0')
self.assertEqual(config['rpmfile'], 'twisted-twistd')
self.assertEqual(config['type'], 'tap')
self.assertEqual(config['quiet'], False)
self.assertEqual(config['twistd_option'], 'file')
self.assertEqual(config['release-name'], 'twisted-twistd-1.0')
def test_protocolCalculatedFromTapFile(self):
"""
The protocol name defaults to a value based on the tapfile value.
"""
config = tap2rpm.MyOptions()
config.parseOptions(['--tapfile', 'pancakes.tap'])
self.assertEqual(config['tapfile'], 'pancakes.tap')
self.assertEqual(config['protocol'], 'pancakes')
def test_optionsDefaultToProtocolValue(self):
"""
Many options default to a value calculated from the protocol name.
"""
config = tap2rpm.MyOptions()
config.parseOptions([
'--tapfile', 'sausages.tap',
'--protocol', 'eggs',
])
self.assertEqual(config['tapfile'], 'sausages.tap')
self.assertEqual(config['maintainer'], 'tap2rpm')
self.assertEqual(config['protocol'], 'eggs')
self.assertEqual(config['description'], 'A TCP server for eggs')
self.assertEqual(config['long_description'],
'Automatically created by tap2rpm')
self.assertEqual(config['set-version'], '1.0')
self.assertEqual(config['rpmfile'], 'twisted-eggs')
self.assertEqual(config['type'], 'tap')
self.assertEqual(config['quiet'], False)
self.assertEqual(config['twistd_option'], 'file')
self.assertEqual(config['release-name'], 'twisted-eggs-1.0')
def test_releaseNameDefaultsToRpmfileValue(self):
"""
The release-name option is calculated from rpmfile and set-version.
"""
config = tap2rpm.MyOptions()
config.parseOptions([
"--rpmfile", "beans",
"--set-version", "1.2.3",
])
self.assertEqual(config['release-name'], 'beans-1.2.3')
def test_basicOperation(self):
"""
Calling tap2rpm should produce an RPM and SRPM with default metadata.
"""
basename = "frenchtoast"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(basename))
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
NAME=["twisted-%s" % (basename,)],
VERSION=["1.0"],
RELEASE=["1"],
SUMMARY=["A TCP server for %s" % (basename,)],
DESCRIPTION=["Automatically created by tap2rpm"],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
NAME=["twisted-%s" % (basename,)],
VERSION=["1.0"],
RELEASE=["1"],
SUMMARY=["A TCP server for %s" % (basename,)],
DESCRIPTION=["Automatically created by tap2rpm"],
))
return d
def test_protocolOverride(self):
"""
Setting 'protocol' should change the name of the resulting package.
"""
basename = "acorn"
protocol = "banana"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(basename),
protocol=protocol)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
NAME=["twisted-%s" % (protocol,)],
SUMMARY=["A TCP server for %s" % (protocol,)],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
NAME=["twisted-%s" % (protocol,)],
SUMMARY=["A TCP server for %s" % (protocol,)],
))
return d
def test_rpmfileOverride(self):
"""
Setting 'rpmfile' should change the name of the resulting package.
"""
basename = "cherry"
rpmfile = "donut"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(basename),
rpmfile=rpmfile)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
NAME=[rpmfile],
SUMMARY=["A TCP server for %s" % (basename,)],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
NAME=[rpmfile],
SUMMARY=["A TCP server for %s" % (basename,)],
))
return d
def test_descriptionOverride(self):
"""
Setting 'description' should change the SUMMARY tag.
"""
description = "eggplant"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(),
description=description)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
SUMMARY=[description],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
SUMMARY=[description],
))
return d
def test_longDescriptionOverride(self):
"""
Setting 'longDescription' should change the DESCRIPTION tag.
"""
longDescription = "fig"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(),
longDescription=longDescription)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
DESCRIPTION=[longDescription],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
DESCRIPTION=[longDescription],
))
return d
def test_setVersionOverride(self):
"""
Setting 'setVersion' should change the RPM's version info.
"""
version = "123.456"
# Create RPMs based on a TAP file with this name.
rpm, srpm = _makeRPMs(tapfile=self._makeTapFile(),
setVersion=version)
# Verify the resulting RPMs have the correct tags.
d = self._verifyRPMTags(rpm,
VERSION=["123.456"],
RELEASE=["1"],
)
d.addCallback(lambda _: self._verifyRPMTags(srpm,
VERSION=["123.456"],
RELEASE=["1"],
))
return d
def test_tapInOtherDirectory(self):
"""
tap2rpm handles tapfiles outside the current directory.
"""
# Make a tapfile outside the current directory.
tempdir = self.mktemp()
os.mkdir(tempdir)
tapfile = self._makeTapFile(os.path.join(tempdir, "bacon"))
# Try and make an RPM from that tapfile.
_makeRPMs(tapfile=tapfile)
| Kagami/kisa | lib/twisted/scripts/test/test_tap2rpm.py | Python | cc0-1.0 | 12,453 |
import FWCore.ParameterSet.Config as cms
from HeavyIonsAnalysis.JetAnalysis.jets.akPu4PFJetSequence_PbPb_mc_cff import *
#PU jets with 15 GeV threshold for subtraction
akPu4PFmatch15 = akPu4PFmatch.clone(src = cms.InputTag("akPu4PFJets15"))
akPu4PFparton15 = akPu4PFparton.clone(src = cms.InputTag("akPu4PFJets15"))
akPu4PFcorr15 = akPu4PFcorr.clone(src = cms.InputTag("akPu4PFJets15"))
akPu4PFpatJets15 = akPu4PFpatJets.clone(jetSource = cms.InputTag("akPu4PFJets15"),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag("akPu4PFcorr15")),
genJetMatch = cms.InputTag("akPu4PFmatch15"),
genPartonMatch = cms.InputTag("akPu4PFparton15"),
)
akPu4PFJetAnalyzer15 = akPu4PFJetAnalyzer.clone(jetTag = cms.InputTag("akPu4PFpatJets15"), doSubEvent = cms.untracked.bool(True) )
akPu4PFJetSequence15 = cms.Sequence(akPu4PFmatch15
*
akPu4PFparton15
*
akPu4PFcorr15
*
akPu4PFpatJets15
*
akPu4PFJetAnalyzer15
)
| mverwe/JetRecoValidation | PuThresholdTuning/python/akPu4PFJetSequence15_cff.py | Python | cc0-1.0 | 1,371 |
import OOMP
newPart = OOMP.oompItem(9185)
newPart.addTag("oompType", "POTE")
newPart.addTag("oompSize", "07")
newPart.addTag("oompColor", "X")
newPart.addTag("oompDesc", "O102")
newPart.addTag("oompIndex", "01")
OOMP.parts.append(newPart)
| oomlout/oomlout-OOMP | old/OOMPpart_POTE_07_X_O102_01.py | Python | cc0-1.0 | 241 |
'''
This is an example how to use PyScanClient library to connect a scan server.
It assumes the server running on localhost at port 4810.
The scan server is a RESTful based web service, which was developed at SNS.
Its binary nightly build could be found at:
https://ics-web.sns.ornl.gov/css/nightly/
and source code is managed at github:
https://github.com/ControlSystemStudio/cs-studio/tree/master/applications/plugins/org.csstudio.scan
The PyScanClient source code is managed at github:
https://github.com/PythonScanClient/PyScanClient
Created on Apr 17, 2015
@author: shen
'''
from scan import ScanClient
if __name__ == '__main__':
client = ScanClient('localhost')
print client
# show server information, which is in XML format.
print client.serverInfo()
| PythonScanClient/PyScanClient | tutorial/1_start.py | Python | epl-1.0 | 787 |
#!/usr/bin/python
import spidev
import time
import datetime
import sys
import math
import struct
"""
================================================
ABElectronics IO Pi V2 32-Channel Port Expander
Version 1.0 Created 20/05/2014
Version 1.1 16/11/2014 updated code and functions to PEP8 format
Requires python smbus to be installed
================================================
"""
class ADC:
"""
Based on the Microchip MCP3208
"""
# variables
__adcrefvoltage = 4.096 # reference voltage for the ADC chip.
# Define SPI bus and init
__spiADC = spidev.SpiDev()
__spiADC.open(0, 0)
__spiADC.max_speed_hz = (50000)
# public methods
def read_adc_voltage(self, channel):
"""
Read the voltage from the selected channel on the ADC
Channel = 1 to 8
"""
if ((channel > 8) or (channel < 1)):
print 'ADC channel needs to be 1 to 8'
raw = self.readADCraw(channel)
voltage = (self.__adcrefvoltage / 4096) * raw
return voltage
def readADCraw(self, channel):
"""
Read the raw value from the selected channel on the ADC
Channel = 1 to 8
"""
if ((channel > 8) or (channel < 1)):
print 'ADC channel needs to be 1 to 8'
return 0.0
channel = channel - 1
r = self.__spiADC.xfer2([4 + (channel >> 2), (channel & 3) << 6, 0])
ret = ((r[1] & 0x0F) << 8) + (r[2])
return ret
def set_adc_refvoltage(self, voltage):
"""
set the reference voltage for the analogue to digital converter.
By default the ADC uses an onboard 4.096V voltage reference. If you
choose to use an external voltage reference you will need to
use this method to set the ADC reference voltage to match the
supplied reference voltage.
The reference voltage must be less than or equal to the voltage on
the Raspberry Pi 5V rail.
"""
if (voltage >= 0.0) and (voltage <= 5.5):
self.__adcrefvoltage = voltage
else:
print 'reference voltage out of range'
return
class DAC:
"""
Based on the Microchip MCP4822
Define SPI bus and init
"""
__spiDAC = spidev.SpiDev()
__spiDAC.open(0, 1)
__spiDAC.max_speed_hz = (4000000)
def set_dac_voltage(self, channel, voltage):
"""
set the voltage for the selected channel on the DAC
voltage can be between 0 and 2.047 volts
"""
if ((channel > 2) or (channel < 1)):
print 'DAC channel needs to be 1 or 2'
if (voltage >= 0.0) and (voltage < 2.048):
rawval = (voltage / 2.048) * 4096
self.set_dac_raw(channel, int(rawval))
return
def set_dac_raw(self, channel, value):
"""
Set the raw value from the selected channel on the DAC
Channel = 1 or 2
Value between 0 and 4095
"""
lowByte = value & 0xff
highByte = (
(value >> 8) & 0xff) | (
channel -
1) << 7 | 0x1 << 5 | 1 << 4
self.__spiDAC.xfer2([highByte, lowByte])
return
class IO:
"""
The MCP23017 chip is split into two 8-bit ports. port 0 controls pins
1 to 8 while port 1 controls pins 9 to 16.
When writing to or reading from a port the least significant bit
represents the lowest numbered pin on the selected port.
#
"""
# Define registers values from datasheet
IODIRA = 0x00 # IO direction A - 1= input 0 = output
IODIRB = 0x01 # IO direction B - 1= input 0 = output
# Input polarity A - If a bit is set, the corresponding GPIO register bit
# will reflect the inverted value on the pin.
IPOLA = 0x02
# Input polarity B - If a bit is set, the corresponding GPIO register bit
# will reflect the inverted value on the pin.
IPOLB = 0x03
# The GPINTEN register controls the interrupt-onchange feature for each
# pin on port A.
GPINTENA = 0x04
# The GPINTEN register controls the interrupt-onchange feature for each
# pin on port B.
GPINTENB = 0x05
# Default value for port A - These bits set the compare value for pins
# configured for interrupt-on-change. If the associated pin level is the
# opposite from the register bit, an interrupt occurs.
DEFVALA = 0x06
# Default value for port B - These bits set the compare value for pins
# configured for interrupt-on-change. If the associated pin level is the
# opposite from the register bit, an interrupt occurs.
DEFVALB = 0x07
# Interrupt control register for port A. If 1 interrupt is fired when the
# pin matches the default value, if 0 the interrupt is fired on state
# change
INTCONA = 0x08
# Interrupt control register for port B. If 1 interrupt is fired when the
# pin matches the default value, if 0 the interrupt is fired on state
# change
INTCONB = 0x09
IOCON = 0x0A # see datasheet for configuration register
GPPUA = 0x0C # pull-up resistors for port A
GPPUB = 0x0D # pull-up resistors for port B
# The INTF register reflects the interrupt condition on the port A pins of
# any pin that is enabled for interrupts. A set bit indicates that the
# associated pin caused the interrupt.
INTFA = 0x0E
# The INTF register reflects the interrupt condition on the port B pins of
# any pin that is enabled for interrupts. A set bit indicates that the
# associated pin caused the interrupt.
INTFB = 0x0F
# The INTCAP register captures the GPIO port A value at the time the
# interrupt occurred.
INTCAPA = 0x10
# The INTCAP register captures the GPIO port B value at the time the
# interrupt occurred.
INTCAPB = 0x11
GPIOA = 0x12 # data port A
GPIOB = 0x13 # data port B
OLATA = 0x14 # output latches A
OLATB = 0x15 # output latches B
# variables
__ioaddress = 0x20 # I2C address
__portA_dir = 0x00 # port a direction
__portB_dir = 0x00 # port b direction
__portA_val = 0x00 # port a value
__portB_val = 0x00 # port b value
__portA_pullup = 0x00 # port a pull-up resistors
__portB_pullup = 0x00 # port a pull-up resistors
__portA_polarity = 0x00 # input polarity for port a
__portB_polarity = 0x00 # input polarity for port b
__intA = 0x00 # interrupt control for port a
__intB = 0x00 # interrupt control for port a
# initial configuration - see IOCON page in the MCP23017 datasheet for
# more information.
__ioconfig = 0x22
global _bus
def __init__(self, bus):
"""
init object with i2c address, default is 0x20, 0x21 for IOPi board,
load default configuration
"""
self._bus = bus
self._bus.write_byte_data(self.__ioaddress, self.IOCON, self.__ioconfig)
self.__portA_val = self._bus.read_byte_data(self.__ioaddress, self.GPIOA)
self.__portB_val = self._bus.read_byte_data(self.__ioaddress, self.GPIOB)
self._bus.write_byte_data(self.__ioaddress, self.IODIRA, 0xFF)
self._bus.write_byte_data(self.__ioaddress, self.IODIRB, 0xFF)
return
# local methods
def __updatebyte(self, byte, bit, value):
""" internal method for setting the value of a single bit
within a byte """
if value == 0:
return byte & ~(1 << bit)
elif value == 1:
return byte | (1 << bit)
def __checkbit(self, byte, bit):
""" internal method for reading the value of a single bit
within a byte """
if byte & (1 << bit):
return 1
else:
return 0
# public methods
def set_pin_direction(self, pin, direction):
"""
set IO direction for an individual pin
pins 1 to 16
direction 1 = input, 0 = output
"""
pin = pin - 1
if pin < 8:
self.__portA_dir = self.__updatebyte(self.__portA_dir, pin, direction)
self._bus.write_byte_data(self.address, self.IODIRA, self.__portA_dir)
else:
self.__portB_dir = self.__updatebyte(self.__portB_dir, pin - 8, direction)
self._bus.write_byte_data(self.address, self.IODIRB, self.__portB_dir)
return
def set_port_direction(self, port, direction):
"""
set direction for an IO port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
1 = input, 0 = output
"""
if port == 1:
self._bus.write_byte_data(self.__ioaddress, self.IODIRB, direction)
self.__portB_dir = direction
else:
self._bus.write_byte_data(self.__ioaddress, self.IODIRA, direction)
self.__portA_dir = direction
return
def set_pin_pullup(self, pin, value):
"""
set the internal 100K pull-up resistors for an individual pin
pins 1 to 16
value 1 = enabled, 0 = disabled
"""
pin = pin - 1
if pin < 8:
self.__portA_pullup = self.__updatebyte(self.__portA_pullup, pin, value)
self._bus.write_byte_data(self.address, self.GPPUA, self.__portA_pullup)
else:
self.__portB_pullup = self.__updatebyte(self.__portB_pullup,pin - 8,value)
self._bus.write_byte_data(self.address, self.GPPUB, self.__portB_pullup)
return
def set_port_pullups(self, port, value):
"""
set the internal 100K pull-up resistors for the selected IO port
"""
if port == 1:
self.__portA_pullup = value
self._bus.write_byte_data(self.__ioaddress, self.GPPUB, value)
else:
self.__portB_pullup = value
self._bus.write_byte_data(self.__ioaddress, self.GPPUA, value)
return
def write_pin(self, pin, value):
"""
write to an individual pin 1 - 16
"""
pin = pin - 1
if pin < 8:
self.__portA_val = self.__updatebyte(self.__portA_val, pin, value)
self._bus.write_byte_data(
self.__ioaddress,
self.GPIOA,
self.__portA_val)
else:
self.__portB_val = self.__updatebyte(
self.__portB_val,
pin -
8,
value)
self._bus.write_byte_data(
self.__ioaddress,
self.GPIOB,
self.__portB_val)
return
def write_port(self, port, value):
"""
write to all pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
value = number between 0 and 255 or 0x00 and 0xFF
"""
if port == 1:
self._bus.write_byte_data(self.__ioaddress, self.GPIOB, value)
self.__portB_val = value
else:
self._bus.write_byte_data(self.__ioaddress, self.GPIOA, value)
self.__portA_val = value
return
def read_pin(self, pin):
"""
read the value of an individual pin 1 - 16
returns 0 = logic level low, 1 = logic level high
"""
pin = pin - 1
if pin < 8:
self.__portA_val =self._bus.read_byte_data(
self.__ioaddress,
self.GPIOA)
return self.__checkbit(self.__portA_val, pin)
else:
pin = pin - 8
self.__portB_val =self._bus.read_byte_data(
self.__ioaddress,
self.GPIOB)
return self.__checkbit(self.__portB_val, pin)
def read_port(self, port):
"""
read all pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
returns number between 0 and 255 or 0x00 and 0xFF
"""
if port == 1:
self.__portB_val =self._bus.read_byte_data(
self.__ioaddress,
self.GPIOB)
return self.__portB_val
else:
self.__portA_val =self._bus.read_byte_data(
self.__ioaddress,
self.GPIOA)
return self.__portA_val
def invert_port(self, port, polarity):
"""
invert the polarity of the pins on a selected port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
polarity 0 = same logic state of the input pin, 1 = inverted logic
state of the input pin
"""
if port == 1:
self._bus.write_byte_data(self.__ioaddress, self.IPOLB, polarity)
self.__portB_polarity = polarity
else:
self._bus.write_byte_data(self.__ioaddress, self.IPOLA, polarity)
self.__portA_polarity = polarity
return
def invert_pin(self, pin, polarity):
"""
invert the polarity of the selected pin
pins 1 to 16
polarity 0 = same logic state of the input pin, 1 = inverted logic
state of the input pin
"""
pin = pin - 1
if pin < 8:
self.__portA_polarity = self.__updatebyte(
self.__portA_val,
pin,
polarity)
self._bus.write_byte_data(
self.__ioaddress,
self.IPOLA,
self.__portA_polarity)
else:
self.__portB_polarity = self.__updatebyte(
self.__portB_val,
pin -
8,
polarity)
self._bus.write_byte_data(
self.__ioaddress,
self.IPOLB,
self.__portB_polarity)
return
def mirror_interrupts(self, value):
"""
1 = The INT pins are internally connected, 0 = The INT pins are not
connected. __intA is associated with PortA and __intB is associated
with PortB
"""
if value == 0:
self.config = self.__updatebyte(self.__ioconfig, 6, 0)
self._bus.write_byte_data(self.__ioaddress, self.IOCON, self.__ioconfig)
if value == 1:
self.config = self.__updatebyte(self.__ioconfig, 6, 1)
self._bus.write_byte_data(self.__ioaddress, self.IOCON, self.__ioconfig)
return
def set_interrupt_polarity(self, value):
"""
This sets the polarity of the INT output pins - 1 = Active-high. 0 =
Active-low.
"""
if value == 0:
self.config = self.__updatebyte(self.__ioconfig, 1, 0)
self._bus.write_byte_data(self.__ioaddress, self.IOCON, self.__ioconfig)
if value == 1:
self.config = self.__updatebyte(self.__ioconfig, 1, 1)
self._bus.write_byte_data(self.__ioaddress, self.IOCON, self.__ioconfig)
return
return
def set_interrupt_type(self, port, value):
"""
Sets the type of interrupt for each pin on the selected port
1 = interrupt is fired when the pin matches the default value, 0 =
the interrupt is fired on state change
"""
if port == 0:
self._bus.write_byte_data(self.__ioaddress, self.INTCONA, value)
else:
self._bus.write_byte_data(self.__ioaddress, self.INTCONB, value)
return
def set_interrupt_defaults(self, port, value):
"""
These bits set the compare value for pins configured for
interrupt-on-change on the selected port.
If the associated pin level is the opposite from the register bit, an
interrupt occurs.
"""
if port == 0:
self._bus.write_byte_data(self.__ioaddress, self.DEFVALA, value)
else:
self._bus.write_byte_data(self.__ioaddress, self.DEFVALB, value)
return
def set_interrupt_on_port(self, port, value):
"""
Enable interrupts for the pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
value = number between 0 and 255 or 0x00 and 0xFF
"""
if port == 0:
self._bus.write_byte_data(self.__ioaddress, self.GPINTENA, value)
self.__intA = value
else:
self._bus.write_byte_data(self.__ioaddress, self.GPINTENB, value)
self.__intB = value
return
def set_interrupt_on_pin(self, pin, value):
"""
Enable interrupts for the selected pin
Pin = 1 to 16
Value 0 = interrupt disabled, 1 = interrupt enabled
"""
pin = pin - 1
if pin < 8:
self.__intA = self.__updatebyte(self.__intA, pin, value)
self._bus.write_byte_data(self.__ioaddress, self.GPINTENA, self.__intA)
else:
self.__intB = self.__updatebyte(self.__intB, pin - 8, value)
self._bus.write_byte_data(self.__ioaddress, self.GPINTENB, self.__intB)
return
def read_interrupt_status(self, port):
"""
read the interrupt status for the pins on the selected port
port 0 = pins 1 to 8, port 1 = pins 9 to 16
"""
if port == 0:
return self._bus.read_byte_data(self.__ioaddress, self.INTFA)
else:
return self._bus.read_byte_data(self.__ioaddress, self.INTFB)
def read_interrupt_capture(self, port):
"""
read the value from the selected port at the time of the last
interrupt trigger
port 0 = pins 1 to 8, port 1 = pins 9 to 16
"""
if port == 0:
return self._bus.read_byte_data(self.__ioaddress, self.INTCAPA)
else:
return self._bus.read_byte_data(self.__ioaddress, self.INTCAPB)
def reset_interrupts(self):
"""
Reset the interrupts A and B to 0
"""
self.read_interrupt_capture(0)
self.read_interrupt_capture(1)
return
class RTC:
"""
Based on the Maxim DS1307
Define registers values from datasheet
"""
SECONDS = 0x00
MINUTES = 0x01
HOURS = 0x02
DAYOFWEEK = 0x03
DAY = 0x04
MONTH = 0x05
YEAR = 0x06
CONTROL = 0x07
# variables
__rtcaddress = 0x68 # I2C address
# initial configuration - square wave and output disabled, frequency set
# to 32.768KHz.
__rtcconfig = 0x03
# the DS1307 does not store the current century so that has to be added on
# manually.
__century = 2000
# local methods
def __init__(self, bus):
self._bus = bus
self.__config = self.__rtcconfig
self._bus.write_byte_data(self.__rtcaddress, self.CONTROL, self.__config)
return
def __bcd_to_dec(self, x):
return x - 6 * (x >> 4)
def __dec_to_bcd(self, val):
return ((val / 10 * 16) + (val % 10))
def __get_century(self, val):
if len(val) > 2:
y = val[0] + val[1]
self.__century = int(y) * 100
return
def __updatebyte(self, byte, bit, value):
"""
internal method for setting the value of a single bit within a byte
"""
if value == 0:
return byte & ~(1 << bit)
elif value == 1:
return byte | (1 << bit)
# public methods
def set_date(self, date):
"""
set the date and time on the RTC
date must be in ISO 8601 format - YYYY-MM-DDTHH:MM:SS
"""
d = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S")
self.__get_century(date)
self._bus.write_byte_data(
self.__rtcaddress,
self.SECONDS,
self.__dec_to_bcd(
d.second))
self._bus.write_byte_data(
self.__rtcaddress,
self.MINUTES,
self.__dec_to_bcd(
d.minute))
self._bus.write_byte_data(
self.__rtcaddress,
self.HOURS,
self.__dec_to_bcd(
d.hour))
self._bus.write_byte_data(
self.__rtcaddress,
self.DAYOFWEEK,
self.__dec_to_bcd(
d.weekday()))
self._bus.write_byte_data(
self.__rtcaddress,
self.DAY,
self.__dec_to_bcd(
d.day))
self._bus.write_byte_data(
self.__rtcaddress,
self.MONTH,
self.__dec_to_bcd(
d.month))
self._bus.write_byte_data(
self.__rtcaddress,
self.YEAR,
self.__dec_to_bcd(
d.year -
self.__century))
return
def read_date(self):
"""
read the date and time from the RTC in ISO 8601 format -
YYYY-MM-DDTHH:MM:SS
"""
seconds, minutes, hours, dayofweek, day, month, year \
=self._bus.read_i2c_block_data(self.__rtcaddress, 0, 7)
date = (
"%02d-%02d-%02dT%02d:%02d:%02d " %
(self.__bcd_to_dec(year) +
self.__century,
self.__bcd_to_dec(month),
self.__bcd_to_dec(day),
self.__bcd_to_dec(hours),
self.__bcd_to_dec(minutes),
self.__bcd_to_dec(seconds)))
return date
def enable_output(self):
"""
Enable the output pin
"""
self.__config = self.__updatebyte(self.__config, 7, 1)
self.__config = self.__updatebyte(self.__config, 4, 1)
self._bus.write_byte_data(self.__rtcaddress, self.CONTROL, self.__config)
return
def disable_output(self):
"""
Disable the output pin
"""
self.__config = self.__updatebyte(self.__config, 7, 0)
self.__config = self.__updatebyte(self.__config, 4, 0)
self._bus.write_byte_data(self.__rtcaddress, self.CONTROL, self.__config)
return
def set_frequency(self, frequency):
"""
set the frequency of the output pin square-wave
options are: 1 = 1Hz, 2 = 4.096KHz, 3 = 8.192KHz, 4 = 32.768KHz
"""
if frequency == 1:
self.__config = self.__updatebyte(self.__config, 0, 0)
self.__config = self.__updatebyte(self.__config, 1, 0)
if frequency == 2:
self.__config = self.__updatebyte(self.__config, 0, 1)
self.__config = self.__updatebyte(self.__config, 1, 0)
if frequency == 3:
self.__config = self.__updatebyte(self.__config, 0, 0)
self.__config = self.__updatebyte(self.__config, 1, 1)
if frequency == 4:
self.__config = self.__updatebyte(self.__config, 0, 1)
self.__config = self.__updatebyte(self.__config, 1, 1)
self._bus.write_byte_data(self.__rtcaddress, self.CONTROL, self.__config)
return
| moeskerv/ABElectronics_Python_Libraries | ExpanderPi/ABE_ExpanderPi.py | Python | gpl-2.0 | 22,720 |
import os
import csv
import sys
import re
import importlib
import networkx as nx
# settings
#curDir = 'E:/Copy/Coursera/Bioinformatics Algorithms (part-I)/MyPrograms/week6-python'
curDir = 'D:/Copy/Coursera/Bioinformatics Algorithms (part-I)/MyPrograms/week6-python'
inputFile = './data/6.local_alignment-2.txt'
#inputFile = 'C:/Users/Ashis/Downloads/dataset_247_9.txt'
outputFile = './results/6.local_alignment.txt'
scoreFile = './data/PAM250_1.txt'
# set current directory
os.chdir(curDir)
## read input
with open(inputFile) as f:
inputs = f.readlines()
protein1 = inputs[0].strip()
protein2 = inputs[1].strip()
## read pam score matrix
## colIndex is map from amino acid to its index
## pam is a 2D matrix
with open(scoreFile) as f:
lines = f.readlines()
cols = re.split("\\s*", lines[0].strip())
colIndex = {cols[i]:i for i in range(0,len(cols))}
pam = [[int(i) for i in re.split("\\s*", row.strip())[1:]] for row in lines[1:]]
#print(pam)
## indel penalty
sigma = 5
## create scores and backtrack arrays
len1 = len(protein1)
len2 = len(protein2)
scores = [[0]*(len2+1) for x in range(len1+1)]
backtrack = [[3]*(len2+1) for x in range(len1+1)] # 0: top, 1:left, 2:diag, 3:start
## put first row and first column of scores and backtrack arrays
#for i in range(1,len1+1):
# scores[i][0] = scores[i-1][0] - sigma
# backtrack[i][0] = 0
#for j in range(1,len2+1):
# scores[0][j] = scores[0][j-1] - sigma
# backtrack[0][j] = 1
## update scores in greedy approach
for i in range(1,len1+1):
for j in range(1,len2+1):
topScore = scores[i-1][j] - sigma
leftScore = scores[i][j-1] - sigma
diagScore = scores[i-1][j-1] + pam[colIndex[protein1[i-1]]][colIndex[protein2[j-1]]]
startScore = 0
candidateScores = [topScore, leftScore, diagScore, startScore]
scores[i][j] = max(candidateScores)
backtrack[i][j] = candidateScores.index(scores[i][j])
## max score node is the
maxScore = float("-inf")
maxi = -1
maxj = -1
for i in range(1,len1+1):
for j in range(1,len2+1):
if scores[i][j] > maxScore:
maxScore = scores[i][j]
maxi = i
maxj = j
## max score
#maxScore = scores[len1][len2]
## backtrack and find alignment
alignment1 = []
alignment2 = []
i = maxi
j = maxj
while i!=0 or j!=0:
if backtrack[i][j] == 0: # top
alignment1.append(protein1[i-1])
alignment2.append('-')
i = i-1
elif backtrack[i][j] == 1: # left
alignment1.append('-')
alignment2.append(protein2[j-1])
j = j-1
elif backtrack[i][j] == 2: # diag
alignment1.append(protein1[i-1])
alignment2.append(protein2[j-1])
i = i-1
j = j-1
else: # start
i = 0
j = 0
alignment1.reverse()
alignment2.reverse()
print(maxScore)
#print(alignment1)
#print(alignment2)
# output
with open(outputFile, "w") as f:
f.writelines(str(maxScore) + "\n")
f.writelines("".join(alignment1) + "\n")
f.writelines("".join(alignment2) + "\n")
| alorchhota/bioinformatics-algorithms-1 | week6-Python/code/6.LocalAlignment.py | Python | gpl-2.0 | 3,114 |
# -*- coding: utf-8 -*-
from flask import Flask
from flask.ext.pymongo import PyMongo
import config
from views import diary
def create_app(cfg):
app = Flask(__name__)
app.config.from_object(cfg)
return app
# Application creation.
app = create_app(config)
# Database configuration.
db = PyMongo()
# Blueprints registration.
app.register_blueprint(diary)
if __name__ == '__main__':
app.run()
| GruPy-RN/agenda_flask | diary/__init__.py | Python | gpl-2.0 | 416 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import sys
def main():
args = parseArgs(sys.argv)
# get analogy files in the analogy directory
# http://stackoverflow.com/questions/3207219/how-to-list-all-files-of-a-directory-in-python
resultFns = [os.path.join(args.results, fn) for fn in
next(os.walk(args.results))[2]]
glbCorrCnt = 0
glbLineCnt = 0
semCorrCnt = 0
semLineCnt = 0
synCorrCnt = 0
synLineCnt = 0
for resultFn in resultFns:
lineCnt = 0
corrCnt = 0
ansInQuCnt = 0
with open(resultFn, 'rb') as resultF:
# print(resultFn)
for result in resultF:
words = result.split()
# print(words)
if words[4] == '**NO_EMBEDDING_FOR_A_WORD**':
continue
if len(words) < 5:
break
if words[3] == words[4]:
corrCnt += 1
if words[4] in words[:3]:
ansInQuCnt += 1
lineCnt += 1
if lineCnt < 1:
continue
acc = float(corrCnt) / lineCnt
ansInQu = float(ansInQuCnt) / lineCnt
print('{:s} {:0.3f} {:0.3f}'.format(
os.path.basename(resultFn),
acc, ansInQu))
glbLineCnt += lineCnt
glbCorrCnt += corrCnt
if os.path.basename(resultFn).startswith('gram'):
synCorrCnt += corrCnt
synLineCnt += lineCnt
else:
semCorrCnt += corrCnt
semLineCnt += lineCnt
if semLineCnt > 0:
print('Sem Av {:0.3f}'.format(float(semCorrCnt) / semLineCnt))
if synLineCnt > 0:
print('Syn Av {:0.3f}'.format(float(synCorrCnt) / synLineCnt))
print('Average {:0.3f}'.format(float(glbCorrCnt) / glbLineCnt))
def parseArgs(args):
parser = argparse.ArgumentParser(
description='Evaluate analogy results. Written in Python 2.7.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('results',
type=validDirectory,
help='The directory containing result files.')
return parser.parse_args()
def validDirectory(dirName):
if os.path.isdir(dirName):
return dirName
msg = 'Directory "{:s}" does not exist.'.format(dirName)
raise argparse.ArgumentTypeError(msg)
if __name__ == '__main__':
main()
| ppegusii/cs689-mini1 | src/evaluate.py | Python | gpl-2.0 | 2,517 |
import os
import sys
import unittest
from unittest.mock import patch, MagicMock
sys.path.append(os.path.join(os.path.dirname(__file__), '../../src/'))
from katello.agent.pulp import libdnf
@unittest.skipIf('dnf' not in sys.modules, "Dnf not present")
class TestLibDnf(unittest.TestCase):
def test_package_update_on_advisories(self):
advisories = set(["RHSA-1000"])
packages = set([("foo","1.0"), ("bar","2.0")])
def applicable_advisories(items):
self.assertEqual(advisories, items.ids)
return [(MagicMock(), packages)]
mock_libdnf = MagicMock()
mock_libdnf.__enter__ = lambda instance: instance
mock_libdnf.applicable_advisories = applicable_advisories
with patch('katello.agent.pulp.libdnf.LibDnf', return_value= mock_libdnf):
package = libdnf.Package()
package.update([],advisories)
#strip the evrs out of the package
packages = set([pack[0] for pack in packages])
mock_libdnf.upgrade.assert_called_with(packages)
def test_package_update_all(self):
advisories = set()
packages = set()
mock_libdnf = MagicMock()
mock_libdnf.__enter__ = lambda instance: instance
with patch('katello.agent.pulp.libdnf.LibDnf', return_value= mock_libdnf):
package = libdnf.Package()
package.update(packages, advisories)
#strip the evrs out of the package
mock_libdnf.upgrade.assert_called_with(packages)
| Katello/katello-agent | test/test_katello/test_agent/test_pulp/test_libdnf.py | Python | gpl-2.0 | 1,456 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import urllib, urllib2
import xbmcplugin, xbmcaddon, xbmcgui, xbmc
import sys, os, re, json, base64, operator, datetime, time
from resources.local import *
pluginhandle = int(sys.argv[1])
addon = xbmcaddon.Addon()
settings = xbmcaddon.Addon( id = "plugin.video.tele.ml" )
domain = base64.b64decode(settings.getSetting("domain"))
icon_path = os.path.join(xbmc.translatePath(os.path.join(xbmc.translatePath(os.path.join(xbmc.translatePath(os.path.join("special://","home")),"addons")),"plugin.video.tele.ml")),"icon.png")
def parameters_string_to_dict(parameters):
paramDict = {}
if parameters:
paramPairs = parameters[1:].split("&")
for paramsPair in paramPairs:
paramSplits = paramsPair.split('=')
if (len(paramSplits)) == 2:
paramDict[paramSplits[0]] = paramSplits[1]
return paramDict
def translation(id):
return addon.getLocalizedString(id).encode('utf-8','ignore')
def get_list(query,data):
req = urllib2.Request(domain+'api-json-33/'+query+'.php')
f = urllib2.urlopen(req,data)
html = f.read()
f.close()
return html
def info_display(channel_info):
if ";;;" in channel_info:
channel_display = ""
nb_line = 0
nb_content = int(float(settings.getSetting("nb_epg_program")))
get_timezone = datetime.datetime.utcnow() - datetime.datetime.now()
prog_list = channel_info.split(';;;')
prog_list.pop()
for prog in prog_list:
prog_data = prog.split(';;')
progtime = datetime.datetime.fromtimestamp(time.mktime(time.strptime(prog_data[0],"%Y-%m-%d %H:%M:%S")))
displaytime = progtime - get_timezone
if ( displaytime > datetime.datetime.now() ) :
if ( nb_line < nb_content-1 ):
channel_display += displaytime.strftime("%H:%M")+" - "+prog_data[1]+"[CR]"
nb_line += 1
else:
channel_display = "[B]"+displaytime.strftime("%H:%M")+" - "+prog_data[1]+"[/B][CR]"
return channel_display.replace("\\", "")
else:
return channel_info.replace("\\", "")
def addDir(name, url, mode, iconimage, fanart, desc=""):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
xbmcplugin.addSortMethod(pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_LABEL)
ok = True
liz = xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
if ( fanart != '' ):
liz.setProperty("Fanart_Image", fanart)
liz.setInfo(type="Video", infoLabels={"Title": name, "Plot": desc})
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def categories(mode,url,path,img):
hide_adult = ( settings.getSetting( "hide_adult" ) == "true" )
empty_category = ( settings.getSetting( "empty_category" ) == "true" )
data_list = json.loads(get_list(mode,urllib.urlencode({'hide_adult': hide_adult,'empty_category': empty_category})))
if (len(data_list) > 0):
for (category_id, category_info) in data_list.items():
add_category(category_info[0], category_id, url, domain+'images/'+path+'/'+category_id+'.'+img, domain+'images/channels/'+category_info[3]+'.jpg', category_info[4], category_info[2], category_info[1])
xbmcplugin.endOfDirectory(pluginhandle)
def add_category(name, url, mode, iconimage, fanart, working, nb_channels, desc=""):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
xbmcplugin.addSortMethod(pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_LABEL)
ok = True
if (working == 0):
name = name+" | "+translation(30023)
liz = xbmcgui.ListItem(name, iconImage=iconimage, thumbnailImage=iconimage)
if ( fanart != '' ):
liz.setProperty("Fanart_Image", fanart)
liz.setInfo(type="Video", infoLabels={"Title": name, "Plot": desc, "Genre": str(nb_channels)+" "+translation(30003)})
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=True)
return ok
def channels(mode,url):
hide_adult = ( settings.getSetting( "hide_adult" ) == "true" )
broken_channel = ( settings.getSetting( "broken_channel" ) == "true" )
epg_display = ( settings.getSetting( "epg_display" ) == "true" )
data_list = json.loads(get_list(mode,urllib.urlencode({'content_id': url, 'hide_adult': hide_adult, 'broken_channel': broken_channel, 'epg_display': epg_display})))
if (len(data_list) > 0):
sorted_data_list = sorted(data_list.items(), key=lambda k: int(k[0]))
for (channel_info) in (sorted_data_list):
add_channel(channel_info[1][0], channel_info[1][2], 'sources', domain+'images/channels/'+channel_info[1][2]+'.png', domain+'images/channels/'+channel_info[1][2]+'.jpg', "", channel_info[1][3], channel_info[1][4], channel_info[1][1])
xbmcplugin.endOfDirectory(pluginhandle)
def add_channel(name, url, mode, iconimage, fanart, channel_nb, genre, working, desc=""):
u = sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)
xbmcplugin.addSortMethod(pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_UNSORTED)
xbmcplugin.addSortMethod(pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_LABEL)
ok = True
if (working == 0):
name = name+" | "+translation(30023)
liz = xbmcgui.ListItem(name.replace("\\", ""), iconImage=iconimage, thumbnailImage=iconimage)
if ( fanart != '' ):
liz.setProperty("Fanart_Image", fanart)
liz.setInfo(type="Video", infoLabels={"Title": name.replace("\\", ""), "Plot": info_display(desc), "Genre": genre})
ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=u, listitem=liz, isFolder=False)
return ok
def sources(mode,url):
source_list = []
display_list = []
broken_stream = ( settings.getSetting( "broken_stream" ) == "true" )
geo_block = ( settings.getSetting( "geo_block" ) == "true" )
ext_player = ( settings.getSetting( "ext_player" ) == "true" )
decay_stream = ( settings.getSetting( "decay_stream" ) == "true" )
email = settings.getSetting( "email" )
password = settings.getSetting( "password" )
data_list = json.loads(get_list(mode,urllib.urlencode({'channel_id': url, 'broken_stream': broken_stream, 'geo_block': geo_block, 'ext_player': ext_player, 'decay_stream': decay_stream, 'email': email, 'password': password})))
if (len(data_list) > 0):
dialog = xbmcgui.Dialog()
try:
for (source_id, source_info) in sorted(data_list.items(), key=operator.itemgetter(1), reverse=True):
source_list.append(source_id)
stream_txt = str(source_info[0])+" px | "+source_info[1].upper()
if ( source_info[3] != 'no' ):
stream_txt = stream_txt+" | "+source_info[3].upper()+" "+translation(30019)
if ( int(source_info[4]) != 0 ):
if (int (source_info[4]) > 24):
stream_txt = stream_txt+" | +"+str(int(source_info[4])/24)+" "+translation(30024)
else:
stream_txt = stream_txt+" | +"+source_info[4]+" "+translation(30020)
if ( source_info[5] != 'no' ):
stream_txt = stream_txt+" | "+translation(30021)
if ( source_info[2] == 'no' ):
stream_txt = stream_txt+" | "+translation(30022)
display_list.append(stream_txt)
stream_url = source_info[6]
video_source = dialog.select(translation(30007), display_list)
if (not video_source == -1 ):
play_video("play_video", source_list[video_source],stream_url)
except:
xbmcgui.Dialog().ok(translation(30017), '[B]'+translation(30030)+'...[/B]', translation(30029))
def play_video(mode,stream_id,stream_url):
xbmc.executebuiltin("XBMC.Notification(%s,%s,%s,%s)" % (translation(30017),translation(30018)+'...',3000,icon_path))
data_list = json.loads(get_list(mode,urllib.urlencode({'stream_id': stream_id, 'stream_url': stream_url})))
if (len(data_list) > 0):
for (stream_id, stream_info) in data_list.items():
liz = xbmcgui.ListItem(stream_info[1], iconImage=domain+'images/channels/'+stream_info[2]+'.png', thumbnailImage=domain+'images/channels/'+stream_info[2]+'.png')
xbmc.Player( xbmc.PLAYER_CORE_MPLAYER ).play(url_convert(stream_info[0]),listitem=liz)
params = parameters_string_to_dict(sys.argv[2])
mode = urllib.unquote_plus(params.get('mode', ''))
url = urllib.unquote_plus(params.get('url', ''))
name = urllib.unquote_plus(params.get('name', ''))
if mode == 'play_video':
play_video(mode,url)
elif mode == 'sources':
sources(mode,url)
elif mode == 'channel_cont':
channels(mode,url)
elif mode == 'channel_coun':
channels(mode,url)
elif mode == 'channel_count':
channels(mode,url)
elif mode == 'category':
categories('category','channel_cont','contents','jpg')
elif mode == 'country':
categories('country','channel_coun','countries','png')
elif mode == 'langage':
categories('language','channel_count','languages','png')
elif mode == 'channel_all':
channels('channel_all',"")
else:
channel_display = ( int(settings.getSetting("category")) )
if channel_display == 0:
categories('category','channel_cont','contents','jpg')
elif channel_display == 1:
categories('country','channel_coun','countries','png')
elif channel_display == 2:
categories('language','channel_count','languages','png')
else:
channels('channel_all',"")
| idi2019/plugin.video.tele.ml | default.py | Python | gpl-2.0 | 9,683 |
from sdssgaussfitter import gaussfit
import numpy as np
import os,sys
from util import utils
from util.readDict import readDict
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def aperture(startpx,startpy,radius=7):
r = radius
length = 2*r
height = length
allx = xrange(startpx-int(np.ceil(length/2.0)),startpx+int(np.floor(length/2.0))+1)
ally = xrange(startpy-int(np.ceil(height/2.0)),startpy+int(np.floor(height/2.0))+1)
pixx = []
pixy = []
mask=np.ones((46,44))
for x in allx:
for y in ally:
if (np.abs(x-startpx))**2+(np.abs(y-startpy))**2 <= (r)**2 and 0 <= y and y < 46 and 0 <= x and x < 44:
mask[y,x]=0.
return mask
#def gaussian(height, center_x, center_y, width_x, width_y,offset):
# """Returns a gaussian function with the given parameters"""
# width_x = float(width_x)
# width_y = float(width_y)
# return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)+offset
#testy = np.array([[gaussian(2,10,10,3,3,5)(x,y) for y in range(46)] for x in range(44)])
#utils.plotArray(testy,cbar=True)
param = readDict()
#param.read_from_file('G158-100params.dict')
#param.read_from_file('pg0220params.dict')
#param.read_from_file('landolt9542params.dict')
#param.read_from_file('corot18params.dict')
if len(sys.argv)<2:
print "Provide file name to fit. Syntax >>python fitPsf.py objectparams.dict [filenumber]"
sys.exit(1)
#read in parameter file as command line argument
param.read_from_file(sys.argv[1])
#provide optional file number if the object in the param file has alternate .npz files to be specified individually
fileNum = None
if len(sys.argv)>2:
fileNum = "_"+str(sys.argv[2])
npzLoadFile = param['npzLoadFile']
npzfitpsf = param['npzfitpsf']
giffitpsf = param['giffitpsf']
if fileNum != None:
npzLoadFile = npzLoadFile.split('.')[0]+fileNum+'.'+npzLoadFile.split('.')[1]
npzfitpsf = npzfitpsf.split('.')[0]+fileNum+'.'+npzfitpsf.split('.')[1]
giffitpsf = giffitpsf.split('.')[0]+fileNum+'.'+giffitpsf.split('.')[1]
FramesPerFile = param['FramesPerFile']
#NumFiles = param['NumFiles']
#for filenum in range(len(NumFiles)):
# if NumFiles[filenum] > 0:
# NumFiles[filenum] = NumFiles[filenum]*FramesPerFile
#NumFrames = NumFiles
NumFrames = 31
print "There should be this many frames: ", NumFrames
guessX = param['guessX'][0]
guessY = param['guessY'][0]
stackDict = np.load(npzLoadFile)
stack = stackDict['stack']
wvls = stackDict['wvls']
print "The file actually has this many: ", len(wvls)
paramsList = []
errorsList = []
fitImgList = []
chisqList = []
plt.ion()
for iFrame in range(0,np.shape(stack)[0]):
frame = stack[iFrame,:,:]
#print "Frame max= ", np.nanmax(frame,axis=None)
#frame *= CorrFactors
#print "Corrected Frame max= ", np.nanmax(frame,axis=None)
nanMask = np.isnan(frame)
#for interval in xrange(len(NumFrames)-1):
# if NumFrames[interval] != NumFrames[interval+1]:
# if NumFrames[interval] < iFrame <= NumFrames[interval+1]:
# guessX = guessX[interval]
# guessY = guessY[interval]
# print guessX, guessY
'''
apertureMask = aperture(guessX,guessY,radius=4)
err = np.sqrt(frame) #divide by 2 to constrain PSF fit even tighter to avoid fitting to wrong peak if PSF is divided by dead pixels
err[frame > 100]=np.inf
#err[frame<10] = 100
frame[nanMask] = 0 #set to finite value that will be ignored
err[nanMask] = np.inf #ignore these data points
err[frame==0] = np.inf
err[apertureMask==1] = 1.0 #np.sqrt(frame[apertureMask==1])/2.0 #weight points closer to the expected psf higher
nearDeadCutoff = 1 #100/15 cps for 4000-6000 angstroms
err[frame<nearDeadCutoff] = np.inf
entireMask = (err==np.inf)
maFrame = np.ma.masked_array(frame,entireMask)
'''
apertureMask = aperture(guessX,guessY,radius=7)
#if iFrame < 19:
# err = np.ones(np.shape(frame))*10.0
#else:
# err = np.zeros(np.shape(frame))
err = np.ones(np.shape(frame))*10.0
#err = (frame)**(0.5)
err[apertureMask==1] = np.inf #weight points closer to the expected psf higher
#err[frame>100] = np.inf
frame[nanMask]=0#set to finite value that will be ignored
err[nanMask] = np.inf#ignore these data points
nearDeadCutoff=1#100/15 cps for 4000-6000 angstroms
err[frame<nearDeadCutoff] = np.inf
entireMask = (err==np.inf)
maFrame = np.ma.masked_array(frame,entireMask)
#try to make smart guesses about initial parameters
#guessAmp = np.max(frame[frame!=np.nan])
#guessHeight = np.median(frame)
#guessWidth = 1.5
guessAmp = 30.
guessHeight = 5.
guessWidth = 1.3
guessParams = [guessHeight,guessAmp,guessX,guessY,guessWidth]
limitedmin = 5*[True]
limitedmax = 5*[True]
minpars = [0,0,0,0,0.1] #default min pars, usually work fine
#minpars = [0,0,27,27,1] #tighter constraint on PSF width to avoid fitting wrong peak if PSF is divided by dead pixels
maxpars = [40,200,43,43,10]
#maxpars = [40,200,33,33,10]
''' #forced parameters for Landolt standard
if iFrame == 27:
minpars = [8,5,0,0,0.5]
maxpars = [30,25,43,45,1.1]
if iFrame == 28:
minpars = [8,5,0,0,0.5]
maxpars = [30,25,43,45,1.1]
if iFrame == 29:
minpars = [8,5,0,0,0.5]
maxpars = [30,25,43,45,1.1]
if iFrame == 30:
minpars = [8,5,0,0,0.5]
maxpars = [30,25,43,45,1.10]
'''
usemoments=[True,True,True,True,True] #doesn't use our guess values, default
#usemoments=[False,False,False,False,False]
print "=========================="
print wvls[iFrame]
print "frame ",iFrame
out = gaussfit(data=maFrame,err=err,params=guessParams,returnfitimage=True,quiet=True,limitedmin=limitedmin,limitedmax=limitedmax,minpars=minpars,maxpars=maxpars,circle=1,usemoments=usemoments,returnmp=True)
mp = out[0]
outparams = mp.params
paramErrors = mp.perror
chisq = mp.fnorm
dof = mp.dof
reducedChisq = chisq/dof
print "reducedChisq =", reducedChisq
fitimg = out[1]
chisqList.append([chisq,dof])
paramsList.append(outparams)
errorsList.append(paramErrors)
print "outparams = ", outparams
print "paramErrors = ", paramErrors
# expectedResiduals = np.ma.masked_array(np.sqrt(frame),mask=entireMask)
# residuals = np.ma.masked_array(np.abs(frame-fitimg),mask=entireMask)
# utils.plotArray(expectedResiduals,cbar=True)
# utils.plotArray(residuals,cbar=True)
# fig = plt.figure()
# ax = fig.add_subplot(111,projection='3d')
# x = np.arange(0,44)
# y = np.arange(0,46)
# X,Y = np.meshgrid(x,y)
# linearMask = np.ravel(entireMask==0)
# ax.plot_wireframe(X,Y,fitimg)
# ax.scatter(outparams[2],outparams[3],outparams[0]+outparams[1],c='black')
# ax.scatter(np.ravel(X)[linearMask],np.ravel(Y)[linearMask],np.ravel(frame)[linearMask],c='red')
#
fitimg[nanMask]=0
# print fitimg[np.isnan(fitimg)]
fitImgList.append(fitimg)
# utils.plotArray(frame,cbar=True)
# utils.plotArray(maFrame,cbar=True)
# utils.plotArray(fitimg,cbar=True)
# plt.show()
# utils.confirm('Enter to continue.')
# plt.close()
# plt.close()
# plt.close()
frame[nanMask]=np.nan
# fig = plt.figure()
# ax1=fig.add_subplot(211)
# ax2 = fig.add_subplot(212)
# for iRow in range(len(frame)):
# ax1.scatter(range(44),frame[iRow,:],c='red',marker='o',alpha=.5,label='data')
# ax1.scatter(range(44),fitimg[iRow,:],c='blue',marker='^',alpha=.5,label='fit')
# ax1.set_title('Fit seen along Cols')
# for iCol in range(np.shape(frame)[1]):
# ax2.scatter(range(46),frame[:,iCol],c='red',marker='o',alpha=.5,label='data')
# ax2.scatter(range(46),fitimg[:,iCol],c='blue',marker='^',alpha=.5,label='fit')
# ax2.set_title('Fit seen along Rows')
# plt.show()
plt.close()
print 'closed'
cube = np.array(fitImgList)
chisqs = np.array(chisqList)
params = np.array(paramsList)
errors = np.array(errorsList)
np.savez(npzfitpsf,fitImg=cube,params=params,errors=errors,chisqs=chisqs,wvls=wvls)
print 'saved'
utils.makeMovie(fitImgList,frameTitles=wvls, cbar=True, outName=giffitpsf, normMin=0, normMax=50)
| bmazin/ARCONS-pipeline | examples/Pal2012_landoltPhot/fitPsf.py | Python | gpl-2.0 | 8,372 |
#!/usr/bin/env python
#encoding: utf-8
# Martin Kersner, [email protected]
# 2016/03/17
from __future__ import print_function
import os
import sys
import glob,cv2
from PIL import Image as PILImage
import numpy as np
from utils import mat2png_hariharan,pascal_palette_invert
def main():
input_path, output_path = process_arguments(sys.argv)
if os.path.isdir(input_path) and os.path.isdir(output_path):
# glob.blob 返回所有匹配的文件路径列表
mat_files = glob.glob(os.path.join(input_path, '*.mat'))
convert_mat2png(mat_files, output_path)
else:
help('Input or output path does not exist!\n')
def process_arguments(argv):
num_args = len(argv)
input_path = None
output_path = None
if num_args == 3:
input_path = argv[1]
output_path = argv[2]
else:
help()
if not os.path.exists(output_path):
os.makedirs(output_path)
return input_path, output_path
def convert_mat2png(mat_files, output_path):
if not mat_files:
help('Input directory does not contain any Matlab files!\n')
l2c = pascal_palette_invert()
for ind,mat in enumerate(mat_files):
print(ind,mat)
numpy_img = mat2png_hariharan(mat)
color = np.zeros( numpy_img.shape + (3,))
for l in l2c.keys():
color[numpy_img == l,:] = l2c[l]
pil_img = PILImage.fromarray(color.astype('uint8'))
#pil_img = PILImage.fromarray(numpy_img).convert("RGB")
#for y in range(numpy_img.shape[0]):
# for x in range(numpy_img.shape[1]):
# c = l2c[numpy_img[y,x]]
# pil_img.putpixel((x,y),c)
#pil_img = PILImage.fromarray(numpy_img)
pil_img.save(os.path.join(output_path, modify_image_name(mat, 'png')))
# Extract name of image from given path, replace its extension with specified one
# and return new name only, not path.
def modify_image_name(path, ext):
return os.path.basename(path).split('.')[0] + '.' + ext
def help(msg=''):
print(msg +
'Usage: python mat2png.py INPUT_PATH OUTPUT_PATH\n'
'INPUT_PATH denotes path containing Matlab files for conversion.\n'
'OUTPUT_PATH denotes path where converted Png files ar going to be saved.'
, file=sys.stderr)
exit()
if __name__ == '__main__':
main()
| z01nl1o02/tests | voc/sbd_dataset/mat2png.py | Python | gpl-2.0 | 2,232 |
# -*- coding: utf-8 -*-
# Copyright(c) 2016-2020 Jonas Sjöberg <[email protected]>
# Source repository: https://github.com/jonasjberg/autonameow
#
# This file is part of autonameow.
#
# autonameow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# autonameow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with autonameow. If not, see <http://www.gnu.org/licenses/>.
import logging
from collections import defaultdict
from core import event
from core import logs
from core.datastore import repository
from core.datastore.query import QueryResponseFailure
from core.exceptions import AutonameowException
from core.model import genericfields
from util import sanity
log = logging.getLogger(__name__)
def _map_generic_sources(meowuri_class_map):
"""
Returns a dict keyed by provider classes storing sets of "generic"
fields as Unicode strings.
"""
klass_generic_meowuris_map = defaultdict(set)
for _, klass in meowuri_class_map.items():
# TODO: [TD0151] Fix inconsistent use of classes/instances.
# TODO: [TD0157] Look into analyzers 'FIELD_LOOKUP' attributes.
for _, field_metainfo in klass.metainfo().items():
generic_field_string = field_metainfo.get('generic_field')
if not generic_field_string:
continue
assert isinstance(generic_field_string, str)
generic_field_klass = genericfields.get_field_for_uri_leaf(generic_field_string)
if not generic_field_klass:
continue
assert issubclass(generic_field_klass, genericfields.GenericField)
generic_meowuri = generic_field_klass.uri()
if not generic_meowuri:
continue
klass_generic_meowuris_map[klass].add(generic_meowuri)
return klass_generic_meowuris_map
def _get_meowuri_source_map():
"""
Returns a dict mapping "MeowURIs" to provider classes.
Example return value: {
'extractor.filesystem.xplat': CrossPlatformFilesystemExtractor,
'extractor.metadata.exiftool': ExiftoolMetadataExtractor,
}
Returns: Dictionary keyed by instances of the 'MeowURI' class,
storing provider classes.
"""
import analyzers
import extractors
mapping = dict()
for module_name in (analyzers, extractors):
module_registry = getattr(module_name, 'registry')
klass_list = module_registry.all_providers
for klass in klass_list:
uri = klass.meowuri_prefix()
assert uri, 'Got empty "meowuri_prefix" from {!s}'.format(klass)
assert uri not in mapping, 'URI "{!s}" already mapped'.format(uri)
mapping[uri] = klass
return mapping
def _get_excluded_sources():
"""
Returns a set of provider classes excluded due to unmet dependencies.
"""
import extractors
import analyzers
all_excluded = set()
for module_name in (analyzers, extractors):
module_registry = getattr(module_name, 'registry')
all_excluded.update(module_registry.excluded_providers)
return all_excluded
class ProviderRegistry(object):
def __init__(self, meowuri_source_map, excluded_providers):
self.log = logging.getLogger(
'{!s}.{!s}'.format(__name__, self.__module__)
)
self.meowuri_sources = dict(meowuri_source_map)
self._debug_log_mapped_meowuri_sources()
self._excluded_providers = excluded_providers
# Set of all MeowURIs "registered" by extractors or analyzers.
self.mapped_meowuris = self.unique_map_meowuris(self.meowuri_sources)
# Providers declaring generic MeowURIs through 'metainfo()'.
self.generic_meowuri_sources = _map_generic_sources(self.meowuri_sources)
self._debug_log_mapped_generic_meowuri_sources()
@property
def excluded_providers(self):
# Sort here so that callers won't have to work around the possibility
# of excluded providers not having a common base class and thus being
# unorderable.
return sorted(self._excluded_providers, key=lambda x: x.__name__)
def _debug_log_mapped_meowuri_sources(self):
if not logs.DEBUG:
return
for uri, klass in sorted(self.meowuri_sources.items()):
self.log.debug('Mapped MeowURI "%s" to %s', uri, klass.name())
def _debug_log_mapped_generic_meowuri_sources(self):
if not logs.DEBUG:
return
for klass, uris in self.generic_meowuri_sources.items():
klass_name = klass.name()
for uri in sorted(uris):
self.log.debug('Mapped generic MeowURI "%s" to %s', uri, klass_name)
def might_be_resolvable(self, uri):
if not uri:
return False
sanity.check_isinstance_meowuri(uri)
resolvable = list(self.mapped_meowuris)
uri_without_leaf = uri.stripleaf()
return any(m.matches_start(uri_without_leaf) for m in resolvable)
def providers_for_meowuri(self, requested_meowuri):
"""
Returns a set of classes that might store data under a given "MeowURI".
Note that the provider "MeowURI" is matched as a substring of the
requested "MeowURI".
Args:
requested_meowuri: The "MeowURI" of interest.
Returns:
A set of classes that "could" produce and store data under a
"MeowURI" that is a substring of the given "MeowURI".
"""
found = set()
if not requested_meowuri:
self.log.error('"providers_for_meowuri()" got empty MeowURI!')
return found
if requested_meowuri.is_generic:
found = self._providers_for_generic_meowuri(requested_meowuri)
else:
found = self._source_providers_for_meowuri(requested_meowuri)
self.log.debug('%s returning %d providers for MeowURI %s',
self.__class__.__name__, len(found), requested_meowuri)
return found
def _providers_for_generic_meowuri(self, requested_meowuri):
found = set()
for klass, meowuris in self.generic_meowuri_sources.items():
if requested_meowuri in meowuris:
found.add(klass)
return found
def _source_providers_for_meowuri(self, requested_meowuri):
# Argument 'requested_meowuri' is a full "source-specific" MeowURI,
# like 'extractor.metadata.exiftool.EXIF:CreateDate'
requested_meowuri_without_leaf = requested_meowuri.stripleaf()
found = set()
for uri in self.meowuri_sources.keys():
# 'uri' is a "MeowURI root" ('extractor.metadata.epub')
if uri.matches_start(requested_meowuri_without_leaf):
found.add(self.meowuri_sources[uri])
return found
@staticmethod
def unique_map_meowuris(meowuri_class_map):
unique_meowuris = set()
for uri in meowuri_class_map.keys():
unique_meowuris.add(uri)
return unique_meowuris
class ProviderRunner(object):
def __init__(self, config, extractor_runner, run_analysis_func):
self.config = config
self._extractor_runner = extractor_runner
assert callable(run_analysis_func), (
'Expected dependency-injected "run_analysis" to be callable'
)
self._run_analysis = run_analysis_func
self._provider_delegation_history = defaultdict(set)
self._delegate_every_possible_meowuri_history = set()
def delegate_to_providers(self, fileobject, uri):
possible_providers = set(Registry.providers_for_meowuri(uri))
if not possible_providers:
log.debug('Got no possible providers for delegation %s', uri)
return
# TODO: [TD0161] Translate from specific to "generic" MeowURI?
# Might be useful to be able to translate a specific MeowURI like
# 'analyzer.ebook.title' to a "generic" like 'generic.metadata.title'.
# Otherwise, user is almost never prompted with any possible candidates.
prepared_analyzers = set()
prepared_extractors = set()
num_possible_providers = len(possible_providers)
for n, provider in enumerate(possible_providers, start=1):
log.debug('Looking at possible provider (%d/%d): %s',
n, num_possible_providers, provider)
if self._previously_delegated_provider(fileobject, provider):
log.debug('Skipping previously delegated provider %s', provider)
continue
self._remember_provider_delegation(fileobject, provider)
if _provider_is_extractor(provider):
prepared_extractors.add(provider)
elif _provider_is_analyzer(provider):
prepared_analyzers.add(provider)
if prepared_extractors:
log.debug('Delegating %s to extractors: %s', uri, prepared_extractors)
self._delegate_to_extractors(fileobject, prepared_extractors)
if prepared_analyzers:
log.debug('Delegating %s to analyzers: %s', uri, prepared_analyzers)
self._delegate_to_analyzers(fileobject, prepared_analyzers)
def _previously_delegated_provider(self, fileobject, provider):
if fileobject in self._delegate_every_possible_meowuri_history:
return True
return bool(
fileobject in self._provider_delegation_history
and provider in self._provider_delegation_history[fileobject]
)
def _remember_provider_delegation(self, fileobject, provider):
self._provider_delegation_history[fileobject].add(provider)
def _delegate_to_extractors(self, fileobject, extractors_to_run):
try:
self._extractor_runner.start(fileobject, extractors_to_run)
except AutonameowException as e:
# TODO: [TD0164] Tidy up throwing/catching of exceptions.
log.critical('Extraction FAILED: %s', e)
raise
def _delegate_to_analyzers(self, fileobject, analyzers_to_run):
self._run_analysis(
fileobject,
self.config,
analyzers_to_run=analyzers_to_run
)
def delegate_every_possible_meowuri(self, fileobject):
self._delegate_every_possible_meowuri_history.add(fileobject)
# Run all extractors
try:
self._extractor_runner.start(fileobject, request_all=True)
except AutonameowException as e:
# TODO: [TD0164] Tidy up throwing/catching of exceptions.
log.critical('Extraction FAILED: %s', e)
raise
# Run all analyzers
self._run_analysis(fileobject, self.config)
def _provider_is_extractor(provider):
# TODO: [hack] Fix circular import problems when running new unit test runner.
# $ PYTHONPATH=autonameow:tests python3 -m unit --skip-slow
from extractors.metadata.base import BaseMetadataExtractor
return issubclass(provider, BaseMetadataExtractor)
def _provider_is_analyzer(provider):
# TODO: [hack] Fix circular import problems when running new unit test runner.
# $ PYTHONPATH=autonameow:tests python3 -m unit --skip-slow
from analyzers import BaseAnalyzer
return issubclass(provider, BaseAnalyzer)
class MasterDataProvider(object):
"""
Handles top-level _DYNAMIC_ data retrieval and data extraction delegation.
This is one of two main means of querying for data related to a file.
Compared to the repository, which is a static storage that either contain
the requested data or not, this is a "reactive" interface to the repository.
If the requested data is in the repository, is it retrieved and returned.
Otherwise, data providers (extractors/analyzers) that might be able
to provide the requested data is executed. If the execution turns up the
requested data, it is returned.
This is intended to be a "dynamic" or "reactive" data retrieval interface
for use by any part of the application.
"""
def __init__(self, config, run_analysis_func):
self.config = config
assert repository.SessionRepository is not None, (
'Expected Repository to be initialized at this point'
)
from core.extraction import ExtractorRunner
extractor_runner = ExtractorRunner(
add_results_callback=repository.SessionRepository.store
)
assert hasattr(extractor_runner, 'start'), (
'Expected "ExtractorRunner" to have an attribute "start"'
)
self.provider_runner = ProviderRunner(
self.config,
extractor_runner,
run_analysis_func
)
def delegate_every_possible_meowuri(self, fileobject):
log.debug('Running all available providers for %r', fileobject)
self.provider_runner.delegate_every_possible_meowuri(fileobject)
def request(self, fileobject, uri):
"""
Highest-level retrieval mechanism for data related to a file.
First the repository is queried with the MeowURI and if the query
returns data, it is returned. If the data is not in the repository,
the task of gathering the data is delegated to the "relevant" providers.
Then the repository is queried again.
If the delegation "succeeded" and the sought after data could be
gathered, it would now be stored in the repository and passed back
as the return value.
None is returned if nothing turns up.
"""
log.debug('Got request %r->[%s]', fileobject, uri)
# First try the repository for previously gathered data
response = self._query_repository(fileobject, uri)
if response:
return response
# Have relevant providers gather the data
self._delegate_to_providers(fileobject, uri)
# Try the repository again
response = self._query_repository(fileobject, uri)
if response:
return response
log.debug('Failed query, then delegation, then another query and returning None')
return QueryResponseFailure(
fileobject=fileobject, uri=uri,
msg='Repository query -> Delegation -> Repository query'
)
def request_one(self, fileobject, uri):
# TODO: [TD0175] Handle requesting exactly one or multiple alternatives.
response = self.request(fileobject, uri)
if isinstance(response, list):
if len(response) == 1:
return response[0]
return QueryResponseFailure(
fileobject=fileobject, uri=uri,
msg='Requested one but response contains {}'.format(len(response))
)
return response
def _delegate_to_providers(self, fileobject, uri):
log.debug('Delegating request to providers: %r->[%s]', fileobject, uri)
self.provider_runner.delegate_to_providers(fileobject, uri)
def _query_repository(self, fileobject, uri):
return repository.SessionRepository.query(fileobject, uri)
_MASTER_DATA_PROVIDER = None
def _initialize_master_data_provider(*_, **kwargs):
active_config = kwargs.get('config')
from core import analysis
run_analysis_func = analysis.run_analysis
# Keep one global 'MasterDataProvider' singleton per 'Autonameow' instance.
global _MASTER_DATA_PROVIDER
_MASTER_DATA_PROVIDER = MasterDataProvider(active_config, run_analysis_func)
def _shutdown_master_data_provider(*_, **__):
# TODO: [TD0202] Handle signals and graceful shutdown properly!
global _MASTER_DATA_PROVIDER
_MASTER_DATA_PROVIDER = None
Registry = None
def _initialize_provider_registry(*_, **__):
# Keep one global 'ProviderRegistry' singleton per 'Autonameow' instance.
global Registry
if not Registry:
Registry = ProviderRegistry(
meowuri_source_map=_get_meowuri_source_map(),
excluded_providers=_get_excluded_sources()
)
def _shutdown_provider_registry(*_, **__):
# TODO: [TD0202] Handle signals and graceful shutdown properly!
global Registry
Registry = None
event.dispatcher.on_config_changed.add(_initialize_master_data_provider)
event.dispatcher.on_startup.add(_initialize_provider_registry)
event.dispatcher.on_shutdown.add(_shutdown_provider_registry)
event.dispatcher.on_shutdown.add(_shutdown_master_data_provider)
def request(fileobject, uri):
sanity.check_isinstance_meowuri(uri)
return _MASTER_DATA_PROVIDER.request(fileobject, uri)
def request_one(fileobject, uri):
sanity.check_isinstance_meowuri(uri)
return _MASTER_DATA_PROVIDER.request_one(fileobject, uri)
def delegate_every_possible_meowuri(fileobject):
_MASTER_DATA_PROVIDER.delegate_every_possible_meowuri(fileobject)
| jonasjberg/autonameow | autonameow/core/master_provider.py | Python | gpl-2.0 | 17,265 |
"""grace
Revision ID: 2bce3f42832
Revises: 100d29f9f7e
Create Date: 2015-08-19 13:48:08.511040
"""
# revision identifiers, used by Alembic.
revision = '2bce3f42832'
down_revision = '100d29f9f7e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('cs_jiao_yi_ma', sa.Column('xzbz', sa.CHAR(length=12), nullable=False))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('cs_jiao_yi_ma', 'xzbz')
### end Alembic commands ###
| huangtao-sh/grace | grace/alembic/versions/2bce3f42832_grace.py | Python | gpl-2.0 | 649 |
# -*- coding: UTF-8 -*-
import eqq
import argparse
from eqq import EqqClient
class EqqMachine(object):
def __init__(self,uin,pwd):
self.eqq=EqqClient()
self.eqq.set_output()
self.uin=uin
self.pwd=pwd
self.eqq.set_account(uin,pwd)
self.eqq.login()
self.eqq.get_friend_info2(uin)
self.eqq.get_user_friends2()
self.eqq.get_group_name_list_mask2()
self.eqq.get_online_buddies2()
self.eqq.get_recent_list2()
self.groups
def init(self):
self.set_message_process()
self.eqq.start()
def run(self):
while True:
cmd=raw_input("eqq#:")
self.pase_command(cmd)
def pase_command(self,command):
pass
def set_message_process(self):
self.eqq.set_poll_type_action('shake_message',self.process_shake_message)
self.eqq.set_poll_type_action('group_message',self.process_group_message)
def process_shake_message(self,message):
print '#shake_message:',message
pass
def process_group_message(self,message):
print '#group_message:',message
print 'content:',message['content']
for c in message['content']:
print 'c:',c
pass
| evilbinary/eqq-python | eqq_machine.py | Python | gpl-2.0 | 1,306 |
'''
Created on Nov 13, 2013
@author: samriggs
CODE CHALLENGE: Solve the Minimum Skew Problem.
https://beta.stepic.org/Bioinformatics-Algorithms-2/Peculiar-Statistics-of-the-Forward-and-Reverse-Half-Strands-7/#step-6
'''
from bi_utils.helpers import sane_open
from cStringIO import StringIO
def min_skew(dataset=''):
if (not dataset):
dataset = "stepic_dataset.txt"
# O(n)
with sane_open(dataset) as f:
skew = 0
skew_list = []
for c in f.readline():
# calculate skew for each char
skew_list.append(skew)
if (c == "C"):
skew -= 1
elif (c == "G"):
skew += 1
# get min value, O(n)
min_skew = min(skew_list)
# O(n)
position = 0
file_str = StringIO()
for num in skew_list:
if (num == min_skew):
file_str.write( str(position) )
file_str.write(" ")
position += 1
print file_str.getvalue().strip()
if (__name__ == "__main__"):
min_skew() | samriggs/bioinf | Homeworks/bi-Python/chapter1/quiz6_solution.py | Python | gpl-2.0 | 1,112 |
#!/usr/bin/python
#GraphML-Topo-to-Mininet-Network-Generator
#
# This file parses Network Topologies in GraphML format from the Internet Topology Zoo.
# A python file for creating Mininet Topologies will be created as Output.
# Files have to be in the same directory.
#
# Arguments:
# -f [filename of GraphML input file]
# --file [filename of GraphML input file]
# -o [filename of GraphML output file]
# --output [filename of GraphML output file]
# -b [number as integer for bandwidth in mbit]
# --bw [number as integer for bandwidth in mbit]
# --bandwidth [number as integer for bandwidth in mbit]
# -c [controller ip as string]
# --controller [controller ip as string]
#
# Without any input, program will terminate.
# Without specified output, outputfile will have the same name as the input file.
# This means, the argument for the outputfile can be omitted.
# Parameters for bandwith and controller ip have default values, if they are omitted, too.
#
#
# sjas
# Wed Jul 17 02:59:06 PDT 2013
#
#
# TODO's:
# - fix double name error of some topologies
# - fix topoparsing (choose by name, not element <d..>)
# = topos with duplicate labels
# - use 'argparse' for script parameters, eases help creation
#
#################################################################################
import xml.etree.ElementTree as ET
import sys
import math
import re
from sys import argv
input_file_name = ''
output_file_name = ''
bandwidth_argument = ''
controller_ip = ''
# first check commandline arguments
for i in range(len(argv)):
if argv[i] == '-f':
input_file_name = argv[i+1]
if argv[i] == '--file':
input_file_name = argv[i+1]
if argv[i] == '-o':
output_file_name = argv[i+1]
if argv[i] == '--output':
output_file_name = argv[i+1]
if argv[i] == '-b':
bandwidth_argument = argv[i+1]
if argv[i] == '--bw':
bandwidth_argument = argv[i+1]
if argv[i] == '--bandwidth':
bandwidth_argument = argv[i+1]
if argv[i] == '-c':
controller_ip = argv[i+1]
if argv[i] == '--controller':
controller_ip = argv[i+1]
# terminate when inputfile is missing
if input_file_name == '':
sys.exit('\n\tNo input file was specified as argument....!')
# define string fragments for output later on
outputstring_1 = '''#!/usr/bin/python
"""
Custom topology for Mininet, generated by GraphML-Topo-to-Mininet-Network-Generator.
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.node import Node
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.util import dumpNodeConnections
class GeneratedTopo( Topo ):
"Internet Topology Zoo Specimen."
def __init__( self, **opts ):
"Create a topology."
# Initialize Topology
Topo.__init__( self, **opts )
'''
outputstring_2a='''
# add nodes, switches first...
'''
outputstring_2b='''
# ... and now hosts
'''
outputstring_3a='''
# add edges between switch and corresponding host
'''
outputstring_3b='''
# add edges between switches
'''
outputstring_4a='''
topos = { 'generated': ( lambda: GeneratedTopo() ) }
# HERE THE CODE DEFINITION OF THE TOPOLOGY ENDS
# the following code produces an executable script working with a remote controller
# and providing ssh access to the the mininet hosts from within the ubuntu vm
'''
outputstring_4b = '''
def setupNetwork(controller_ip):
"Create network and run simple performance test"
# check if remote controller's ip was set
# else set it to localhost
topo = GeneratedTopo()
if controller_ip == '':
#controller_ip = '10.0.2.2';
controller_ip = '127.0.0.1';
net = Mininet(topo=topo, controller=lambda a: RemoteController( a, ip=controller_ip, port=6633 ), host=CPULimitedHost, link=TCLink)
return net
def connectToRootNS( network, switch, ip, prefixLen, routes ):
"Connect hosts to root namespace via switch. Starts network."
"network: Mininet() network object"
"switch: switch to connect to root namespace"
"ip: IP address for root namespace node"
"prefixLen: IP address prefix length (e.g. 8, 16, 24)"
"routes: host networks to route to"
# Create a node in root namespace and link to switch 0
root = Node( 'root', inNamespace=False )
intf = TCLink( root, switch ).intf1
root.setIP( ip, prefixLen, intf )
# Start network that now includes link to root namespace
network.start()
# Add routes from root ns to hosts
for route in routes:
root.cmd( 'route add -net ' + route + ' dev ' + str( intf ) )
# Run D-ITG logger on root
root.cmd('ITGLog &')
def sshd( network, cmd='/usr/sbin/sshd', opts='-D' ):
"Start a network, connect it to root ns, and run sshd on all hosts."
switch = network.switches[ 0 ] # switch to use
ip = '10.123.123.1' # our IP address on host network
routes = [ '10.0.0.0/8' ] # host networks to route to
connectToRootNS( network, switch, ip, 8, routes )
for host in network.hosts:
host.cmd( cmd + ' ' + opts + '&' )
host.cmd( 'ITGRecv -l /tmp/ITGRecv-Logs/ITGRecv-' + host.IP() + '.log > /dev/null &' )
# DEBUGGING INFO
print
print "Dumping host connections"
dumpNodeConnections(network.hosts)
print
print "*** Hosts are running sshd at the following addresses:"
print
for host in network.hosts:
print host.name, host.IP()
print
print "*** Type 'exit' or control-D to shut down network"
print
print "*** For testing network connectivity among the hosts, wait a bit for the controller to create all the routes, then do 'pingall' on the mininet console."
print
CLI( network )
for host in network.hosts:
host.cmd( 'kill %' + cmd )
network.stop()
if __name__ == '__main__':
setLogLevel('info')
#setLogLevel('debug')
sshd( setupNetwork(controller_ip) )
'''
#WHERE TO PUT RESULTS
outputstring_to_be_exported = ''
outputstring_to_be_exported += outputstring_1
#READ FILE AND DO ALL THE ACTUAL PARSING IN THE NEXT PARTS
xml_tree = ET.parse(input_file_name)
namespace = "{http://graphml.graphdrawing.org/xmlns}"
ns = namespace # just doing shortcutting, namespace is needed often.
#GET ALL ELEMENTS THAT ARE PARENTS OF ELEMENTS NEEDED LATER ON
root_element = xml_tree.getroot()
graph_element = root_element.find(ns + 'graph')
# GET ALL ELEMENT SETS NEEDED LATER ON
index_values_set = root_element.findall(ns + 'key')
node_set = graph_element.findall(ns + 'node')
edge_set = graph_element.findall(ns + 'edge')
# SET SOME VARIABLES TO SAVE FOUND DATA FIRST
# memomorize the values' ids to search for in current topology
node_label_name_in_graphml = ''
node_latitude_name_in_graphml = ''
node_longitude_name_in_graphml = ''
# for saving the current values
node_index_value = ''
node_name_value = ''
node_longitude_value = ''
node_latitude_value = ''
# id:value dictionaries
id_node_name_dict = {} # to hold all 'id: node_name_value' pairs
id_longitude_dict = {} # to hold all 'id: node_longitude_value' pairs
id_latitude_dict = {} # to hold all 'id: node_latitude_value' pairs
# FIND OUT WHAT KEYS ARE TO BE USED, SINCE THIS DIFFERS IN DIFFERENT GRAPHML TOPOLOGIES
for i in index_values_set:
if i.attrib['attr.name'] == 'label' and i.attrib['for'] == 'node':
node_label_name_in_graphml = i.attrib['id']
if i.attrib['attr.name'] == 'Longitude':
node_longitude_name_in_graphml = i.attrib['id']
if i.attrib['attr.name'] == 'Latitude':
node_latitude_name_in_graphml = i.attrib['id']
# NOW PARSE ELEMENT SETS TO GET THE DATA FOR THE TOPO
# GET NODE_NAME DATA
# GET LONGITUDE DATK
# GET LATITUDE DATA
for n in node_set:
node_index_value = n.attrib['id']
#get all data elements residing under all node elements
data_set = n.findall(ns + 'data')
#finally get all needed values
for d in data_set:
#node name
if d.attrib['key'] == node_label_name_in_graphml:
#strip all whitespace from names so they can be used as id's
node_name_value = re.sub(r'\s+', '', d.text)
#longitude data
if d.attrib['key'] == node_longitude_name_in_graphml:
node_longitude_value = d.text
#latitude data
if d.attrib['key'] == node_latitude_name_in_graphml:
node_latitude_value = d.text
#save id:data couple
id_node_name_dict[node_index_value] = node_name_value
id_longitude_dict[node_index_value] = node_longitude_value
id_latitude_dict[node_index_value] = node_latitude_value
# STRING CREATION
# FIRST CREATE THE SWITCHES AND HOSTS
tempstring1 = ''
tempstring2 = ''
tempstring3 = ''
for i in range(0, len(id_node_name_dict)):
#create switch
temp1 = ' '
temp1 += id_node_name_dict[str(i)]
temp1 += " = self.addSwitch( 's"
temp1 += str(i)
temp1 += "' )\n"
#create corresponding host
temp2 = ' '
temp2 += id_node_name_dict[str(i)]
temp2 += "_host = self.addHost( 'h"
temp2 += str(i)
temp2 += "' )\n"
tempstring1 += temp1
tempstring2 += temp2
# link each switch and its host...
temp3 = ' self.addLink( '
temp3 += id_node_name_dict[str(i)]
temp3 += ' , '
temp3 += id_node_name_dict[str(i)]
temp3 += "_host )"
temp3 += '\n'
tempstring3 += temp3
outputstring_to_be_exported += outputstring_2a
outputstring_to_be_exported += tempstring1
outputstring_to_be_exported += outputstring_2b
outputstring_to_be_exported += tempstring2
outputstring_to_be_exported += outputstring_3a
outputstring_to_be_exported += tempstring3
outputstring_to_be_exported += outputstring_3b
# SECOND CALCULATE DISTANCES BETWEEN SWITCHES,
# set global bandwidth and create the edges between switches,
# and link each single host to its corresponding switch
tempstring4 = ''
tempstring5 = ''
distance = 0.0
latency = 0.0
for e in edge_set:
# GET IDS FOR EASIER HANDLING
src_id = e.attrib['source']
dst_id = e.attrib['target']
# CALCULATE DELAYS
# CALCULATION EXPLANATION
#
# formula: (for distance)
# dist(SP,EP) = arccos{ sin(La[EP]) * sin(La[SP]) + cos(La[EP]) * cos(La[SP]) * cos(Lo[EP] - Lo[SP])} * r
# r = 6378.137 km
#
# formula: (speed of light, not within a vacuumed box)
# v = 1.97 * 10**8 m/s
#
# formula: (latency being calculated from distance and light speed)
# t = distance / speed of light
# t (in ms) = ( distance in km * 1000 (for meters) ) / ( speed of light / 1000 (for ms))
# ACTUAL CALCULATION: implementing this was no fun.
latitude_src = math.radians(float(id_latitude_dict[src_id]))
latitude_dst = math.radians(float(id_latitude_dict[dst_id]))
longitude_src = math.radians(float(id_longitude_dict[src_id]))
longitude_dst = math.radians(float(id_longitude_dict[dst_id]))
first_product = math.sin(latitude_dst) * math.sin(latitude_src)
second_product_first_part = math.cos(latitude_dst) * math.cos(latitude_src)
second_product_second_part = math.cos(longitude_dst - longitude_src)
distance = math.acos(first_product + (second_product_first_part * second_product_second_part)) * 6378.137
# t (in ms) = ( distance in km * 1000 (for meters) ) / ( speed of light / 1000 (for ms))
# t = ( distance * 1000 ) / ( 1.97 * 10**8 / 1000 )
latency = ( distance * 1000 ) / ( 197000 )
# BANDWIDTH LIMITING
#set bw to 10mbit if nothing was specified otherwise on startup
if bandwidth_argument == '':
bandwidth_argument = '10';
# ... and link all corresponding switches with each other
temp4 = ' self.addLink( '
temp4 += id_node_name_dict[src_id]
temp4 += ' , '
temp4 += id_node_name_dict[dst_id]
temp4 += ", bw="
temp4 += bandwidth_argument
temp4 += ", delay='"
temp4 += str(latency)
temp4 += "ms')"
temp4 += '\n'
# next line so i dont have to look up other possible settings
#temp += "ms', loss=0, max_queue_size=1000, use_htb=True)"
tempstring4 += temp4
outputstring_to_be_exported += tempstring4
outputstring_to_be_exported += outputstring_4a
# this is kind of dirty, due to having to use mixed '' ""
temp5 = "controller_ip = '"
temp5 += controller_ip
temp5 += "'\n"
tempstring5 += temp5
outputstring_to_be_exported += tempstring5
outputstring_to_be_exported += outputstring_4b
# GENERATION FINISHED, WRITE STRING TO FILE
outputfile = ''
if output_file_name == '':
output_file_name = input_file_name + '-generated-Mininet-Topo.py'
outputfile = open(output_file_name, 'w')
outputfile.write(outputstring_to_be_exported)
outputfile.close()
print "Topology generation SUCCESSFUL!"
| yossisolomon/assessing-mininet | parser/GraphML-Topo-to-Mininet-Network-Generator.py | Python | gpl-2.0 | 13,124 |
from django.conf.urls import url
from kraut_accounts import views
urlpatterns = [
url(r'^logout/$', views.accounts_logout, name='logout'),
url(r'^login/$', views.accounts_login, name='login'),
url(r'^changepw/$', views.accounts_change_password, name='changepw'),
]
| zeroq/kraut_salad | kraut_accounts/urls.py | Python | gpl-2.0 | 278 |
from PyQt4 import QtCore, uic
from PyQt4.QtGui import *
from subprocess import call
from grafo import Grafo
from uis.uiMainwindow import Ui_MainWindow
from sobre import SobreUi
import pdb
from resultado import Resultado
def debug_trace():
'''Set a tracepoint in the Python debugger that works with Qt'''
from PyQt4.QtCore import pyqtRemoveInputHook
from pdb import set_trace
pyqtRemoveInputHook()
set_trace()
class MainWindow(QMainWindow, Ui_MainWindow):
matrizAdjacencia = {}
grafo = Grafo()
def closeEvent(self, event):
if False: event = QCloseEvent
def alert(self):
QMessageBox.about(self, "TESTE", "TESTE")
def addVertice(self, vertice):
if not vertice: return
self.modelVertice.appendRow(QStandardItem(vertice))
self.listVertices.setModel(self.modelVertice)
self.addToComboVertice(vertice)
self.grafo.addVertice(vertice)
def buttonAddVertice(self):
self.addVertice(self.lineNomeVertice.text())
def addAresta(self, aresta):
if not aresta: return
self.modelAresta.appendRow(QStandardItem(aresta))
self.listArestas.setModel(self.modelAresta)
self.comboAresta.addItem(aresta)
self.grafo.addAresta(aresta)
def buttonAddAresta(self):
self.addAresta(self.lineNomeAresta.text())
def addToComboVertice(self, text):
self.comboVertice1.addItem(text)
self.comboVertice2.addItem(text)
self.comboCaminhoInicio.addItem(text)
self.comboCaminhoFim.addItem(text)
def addConexao(self, v1, aresta, v2, peso = 1):
if not v1 or not aresta or not v2: return
conexao = v1 + '|' + aresta + '|' + v2 + '|' + str(peso)
self.modelConexao.appendRow(QStandardItem(conexao))
self.grafo.addConexao(v1, aresta, v2, peso)
self.listConexoes.setModel(self.modelConexao)
def buttonAddConexao(self):
try:
peso = int(self.linePesoNo.text())
except:
peso = 1
v1, v2 = self.comboVertice1.currentText(), self.comboVertice2.currentText()
aresta = self.comboAresta.currentText()
self.addConexao(v1, aresta, v2, peso)
def gerar(self):
# ['1|2|1|1']
# v1 | a | v2 | peso
resList = []
if self.checkDirecionado.isChecked():
self.grafo.setDirecionado(True)
else:
self.grafo.setDirecionado(False)
if self.checkExisteLaco.isChecked():
if self.grafo.existeLaco():
resList.append("- Existem os seguintes laços:\n" + "\n".join(self.grafo.getLaco()))
else:
resList.append("- Nao existem lacos no grafo.")
if self.checkExisteParalela.isChecked():
if self.grafo.existeArestaParalela():
resList.append("- Existem arestas paralelas")
else:
resList.append("- Nao existem arestas paralelas")
if self.checkExisteIsolado.isChecked():
if self.grafo.existeVerticeIsolado():
resList.append("- Existem vertices isolados")
else:
resList.append("- Nao existem vertices isolados")
if self.checkOrdem.isChecked():
resList.append("- Ordem do grafo: " + str(self.grafo.getOrdem()))
if self.checkExisteCiclo.isChecked():
ciclos = self.grafo.getCiclos()
if ciclos:
resList.append("- Existe(m) ciclo(s) para o(s) vértice(s): " + ", ".join(ciclos))
else:
resList.append("- Nao existem ciclos no grafo")
if self.checkConexo.isChecked():
if self.grafo.isConexo():
resList.append("- Grafo é conexo")
else:
resList.append("- Grafo não é conexo")
if self.checkCaminhoCurto.isChecked():
v1 = self.comboCaminhoInicio.currentText()
v2 = self.comboCaminhoFim.currentText()
if self.grafo.existeCaminho(v1, v2, []):
resList.append("- Existe caminho entre o vértice '" + v1 + "' e '" + v2 +"'")
else:
resList.append("- Nao existe caminho entre o vértice '" + v1 + "' e '" + v2 +"'")
if self.checkGrau.isChecked():
graus = self.grafo.getTodosGraus()
if self.grafo.isDirecionado:
resList.append("- Grau de cada vértice (emissão, recepção):")
else:
resList.append("- Grau de cada vértice:")
for v in graus.keys():
if self.grafo.isDirecionado:
resList.append(" '" + v + "': " + str(graus[v][0]) + ", " + str(graus[v][1]))
else:
resList.append(" '" + v + "': " + str(graus[v]))
resList.append("")
if self.checkAdjacencia.isChecked():
adjacencias = self.grafo.getTodasAdjacencias()
resList.append("- Adjacências de cada vértice:")
for v in adjacencias.keys():
strAdj = "" + v + ": "
verticesAdj = []
for arestaAdj, vertAdj in adjacencias[v]:
verticesAdj.append(vertAdj)
if verticesAdj:
resList.append(strAdj + ", ".join(verticesAdj))
else:
resList.append(strAdj + "Nenhum")
resultado = Resultado("\n".join(resList), self)
resultado.centerToMainWindow()
self.resultados.append(resultado)
def buttonRemoveVertice(self):
index = self.listVertices.currentIndex()
text = self.modelVertice.itemFromIndex(index).text()
self.removeVertice({'index': index.row(), 'value': text})
def removeVertice(self, v):
self.grafo.removeVertice(v['value'])
self.modelVertice.removeRow(v['index'])
eraseFrom = [self.comboVertice1,self.comboVertice2,self.comboCaminhoInicio,self.comboCaminhoFim]
for combo in eraseFrom:
combo.removeItem(combo.findText(v['value']))
# for i in self.comboVertice1.count():
# pass
toErase = []
for i in range(self.modelConexao.rowCount()):
item = self.modelConexao.item(i)
values = item.text().split('|')
if values[0] == str(v['value']) or values[2] == str(v['value']):
toErase.append(item)
for item in toErase:
index = self.modelConexao.indexFromItem(item)
if False: index = QStandardItem
self.modelConexao.removeRow(index.row())
def removeConexao(self, a):
self.grafo.removeConexao(a['value'].split('|')[1])
self.modelConexao.removeRow(a['index'])
def removeAresta(self, a):
self.grafo.removeAresta(a['value'])
self.modelAresta.removeRow(a['index'])
self.comboAresta.removeItem(self.comboAresta.findText(a['value']))
toErase = []
for i in range(self.modelConexao.rowCount()):
item = self.modelConexao.item(i)
values = item.text().split('|')
print(values)
if values[1] == str(a['value']):
toErase.append(item)
for item in toErase:
index = self.modelConexao.indexFromItem(item)
if False: index = QStandardItem
self.modelConexao.removeRow(index.row())
def listVerticesClicked(self, model):
if False: model = QStandardItem
self.modelVertice.removeRow(model.row())
def __init__(self, parent=None, name=None, fl=0):
self.resultados = []
QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
Ui_MainWindow.setupUi(self, self)
self.menuArquivo.setTitle("&Arquivo")
# uic.loadUi('mainwindow.ui', self)
# self.listVertices.setEditTriggers(QApplication.NoEditTriggers)
self.modelVertice = QStandardItemModel(self.listVertices)
self.modelAresta = QStandardItemModel(self.listArestas)
self.modelConexao = QStandardItemModel(self.listConexoes)
self.sobreUi = SobreUi(self)
self.sobreUi.tbAbout.setText(open('sobre.txt').read())
self.events()
def buttonRemoveAresta(self):
index = self.listArestas.currentIndex()
text = self.modelAresta.itemFromIndex(index).text()
self.removeAresta({'index': index.row(), 'value': text})
def buttonRemoveConexao(self):
index = self.listConexoes.currentIndex()
text = self.modelConexao.itemFromIndex(index).text()
self.removeConexao({'index': index.row(), 'value': text})
def events(self):
QtCore.QObject.connect(self.pushAddVertice,
QtCore.SIGNAL("clicked()"),
self.buttonAddVertice)
QtCore.QObject.connect(self.pushAddAresta,
QtCore.SIGNAL("clicked()"),
self.buttonAddAresta)
QtCore.QObject.connect(self.pushAddConexao,
QtCore.SIGNAL("clicked()"),
self.buttonAddConexao)
QtCore.QObject.connect(self.pushRemoverVertice,
QtCore.SIGNAL("clicked()"),
self.buttonRemoveVertice)
QtCore.QObject.connect(self.pushRemoverAresta,
QtCore.SIGNAL("clicked()"),
self.buttonRemoveAresta)
QtCore.QObject.connect(self.pushRemoveConexao,
QtCore.SIGNAL("clicked()"),
self.buttonRemoveConexao)
QtCore.QObject.connect(self.pushGerar, QtCore.SIGNAL("clicked()"), self.gerar)
self.actionSobre.triggered.connect(self.showSobreUi)
self.actionSair.triggered.connect(self.sair)
def showSobreUi(self):
self.sobreUi.show()
self.sobreUi.centerToMainWindow()
def sair(self):
self.close()
def show(self):
super().show()
| sollidsnake/grafo | mainwindow.py | Python | gpl-2.0 | 10,057 |
Subsets and Splits