code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Document Management System for Multiple Records',
'version': '0.1',
'category': 'Knowledge Management',
'summary': 'Document Management System for Multiple Records',
'description': """
Document Management System for Multiple Records
==============================================
Contributors
------------
* El Hadji Dem ([email protected])
""",
'author': 'Savoir-faire Linux',
'website': 'www.savoirfairelinux.com',
'license': 'AGPL-3',
'depends': [
'document',
],
'data': [
'document_view.xml',
'security/ir.model.access.csv',
'wizard/document_wizard_view.xml',
],
'js': [
'static/src/js/document.js'
],
'qweb': [
'static/src/xml/document.xml'
],
'test': [],
'demo': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| rschnapka/knowledge | document_multiple_records/__openerp__.py | Python | agpl-3.0 | 1,965 |
#
# Copyright (c) 2013-2014, Scott J Maddox
#
# This file is part of SimplePL.
#
# SimplePL is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# SimplePL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with SimplePL. If not, see
# <http://www.gnu.org/licenses/>.
#
#######################################################################
'''
Defines the ExpandingBuffer class--a numpy array based expanding buffer
designed for efficient real-time plotting.
'''
import numpy as np
class ExpandingBuffer(object):
'''
A numpy array based expanding buffer that allows efficient appending
and accessing of ordered values. This implementation is designed
specifically for real-time plotting.
'''
# np.float64 needed to hold time.time()
def __init__(self, initial_size=1024, dtype=np.float64):
'''
Creates an ExpandingBuffer with the given initial size and dtype.
:param integer size: the initial size of the ExpandingBuffer
:param numpy.dtype dtype: the data type of the contained values
:returns ExpandingBuffer:
'''
assert initial_size > 0
self._size = initial_size
self.dtype = dtype
self._buffer = np.empty(initial_size, dtype=dtype)
self._index = 0
@classmethod
def from_array(cls, array):
'''
Creates a ExpandingBuffer from the given numpy array. The dtype will
be the same as the array, and the initial values are copied in from
the array.
:param numpy.array array: the numpy array to create the
ExpandingBuffer from
:returns ExpandingBuffer:
'''
rb = cls(array.size, dtype=array.dtype)
rb.extend(array)
return rb
def append(self, value):
'''
Append a value to the end of the ExpandingBuffer.
:param number value: a value to append to the ExpandingBuffer
:returns None:
'''
if self._index >= self._size:
# get a new buffer that's 2x longer
old_buffer = self._buffer
old_size = self._size
self._size = self._size * 2
self._buffer = np.empty(self._size, dtype=self.dtype)
self._buffer[:old_size] = old_buffer
i = self._index
self._buffer[i] = value
self._index += 1
def extend(self, iterable):
'''
Extend the ExpandingBuffer with the values in iterable.
:param sequence iterable: a sequency of values to append
:returns None:
'''
for v in iterable:
self.append(v)
def get(self):
'''
Get the array.
:param None:
:returns numpy.array: the array of values
'''
return self._buffer[0:self._index]
def clear(self):
'''
Clears the contents of the ExpandingBuffer.
:param None:
:returns None:
'''
self._index = 0
def __len__(self):
return self._index
if __name__ == "__main__":
x = ExpandingBuffer(10, dtype=np.int32)
for i in xrange(100):
x.append(i)
print x.get()
| scott-maddox/simplepl | src/simplepl/expanding_buffer.py | Python | agpl-3.0 | 3,653 |
"""
Your module documentation here
"""
class PrimeClass(object):
"""This is a class that contains a is_prime method which checks primes"""
def is_prime(self, num_int):
""" This is a method called is_prime which checks primes number"""
# your primes code here
number = int(num_int)
count = 0
if number == 1:
return False
else:
not_prime = False
for item in range(1, number + 1):
if (number % item) == 0:
count = count + 1
if count >= 3:
not_prime = True
break
return not not_prime or count == 2
| IsabelEsparza/jarsa-applicant | primes.py | Python | agpl-3.0 | 694 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Johannes Baiter <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Configuration entities.
"""
from __future__ import unicode_literals
import copy
import logging
import spreads.vendor.confit as confit
from pathlib import Path
import spreads.util as util
class OptionTemplate(object):
""" Definition of a configuration option.
:attr value: The default value for the option or a list of available
options if :py:attr`selectable` is True
:type value: object (or list/tuple when :py:attr:`selectable` is True)
:attr docstring: A string explaining the configuration option
:type docstring: unicode
:attr selectable: Make the `OptionTemplate` a selectable, i.e. value
contains a list or tuple of acceptable values for this
option, with the first member being the default
selection.
:type selectable: bool
:attr advanced: Whether the option is an advanced option
:type advanced: bool
:attr depends: Make option dependant of some other setting (if passed a
dict) or another plugin (if passed a string)
:type depends: dict/str
"""
def __init__(self, value, docstring=None, selectable=False,
advanced=False, depends=None):
self.value = value
self.docstring = docstring
self.selectable = selectable
self.advanced = advanced
self.depends = depends
def __repr__(self):
return ("OptionTemplate(value={0}, docstring={1}, selectable={2}"
" advanced={3}, depends={4})"
.format(repr(self.value), repr(self.docstring),
repr(self.selectable), repr(self.advanced),
repr(self.depends)))
# Configuration templates for the core
CORE_OPTIONS = {
'verbose': OptionTemplate(value=False,
docstring="Enable verbose output"),
'logfile': OptionTemplate(
value=unicode(Path(util.get_data_dir())/'spreads.log'),
docstring="Path to logfile"),
'loglevel': OptionTemplate(value=['info', 'critical', 'error',
'warning', 'debug'],
docstring="Logging level for logfile",
selectable=True),
'capture_keys': OptionTemplate(value=[" ", "b"],
docstring="Keys to trigger capture",
selectable=False),
'convert_old': OptionTemplate(
value=False,
docstring=("Convert workflows from older spreads version to the new "
"directory layout."),
advanced=True)
}
class Configuration(object):
""" Entity managing configuration state.
Uses :py:class:`confit.Configuration` underneath the hood and follows
its 'overlay'-principle.
Proxies :py:meth:`__getitem__` and :py:meth:`__setitem__` from it, so
it can be used as a dict-like type.
"""
def __init__(self, appname='spreads'):
""" Create new instance and load default and current configuration.
:param appname: Application name, configuration will be loaded from
this name's default configuration directory
"""
self._config = confit.Configuration(appname, __name__)
self._config.read()
if 'plugins' not in self._config.keys():
self['plugins'] = []
self.load_templates()
self.load_defaults(overwrite=False)
# ----------------------------------------- #
# Proxied methods from confit.Configuration #
def __getitem__(self, key):
""" See :py:meth:`confit.ConfigView.__getitem__` """
return self._config[key]
def __setitem__(self, key, value):
""" See :py:meth:`confit.ConfigView.__setitem__` """
self._config[key] = value
def keys(self):
""" See :py:meth:`confit.ConfigView.keys` """
return self._config.keys()
def dump(self, filename=None, full=True, sections=None):
""" See :py:meth:`confit.Configuration.dump` """
return self._config.dump(unicode(filename), full, sections)
def flatten(self):
""" See :py:meth:`confit.Configuration.flatten` """
return self._config.flatten()
# ----------------------------------------- #
def load_templates(self):
""" Get all available configuration templates from the activated
plugins.
:returns: Mapping from plugin name to template mappings.
:rtype: dict unicode -> (dict unicode ->
:py:class:`OptionTemplate`)
"""
import spreads.plugin
self.templates = {}
self.templates['core'] = CORE_OPTIONS
if 'driver' in self.keys():
driver_name = self["driver"].get()
self.templates['device'] = (
spreads.plugin.get_driver(driver_name)
.configuration_template())
plugins = spreads.plugin.get_plugins(*self["plugins"].get())
for name, plugin in plugins.iteritems():
tmpl = plugin.configuration_template()
if tmpl:
self.templates[name] = tmpl
return self.templates
@property
def cfg_path(self):
""" Path to YAML file of the user-specific configuration.
:returns: Path
:rtype: :py:class:`pathlib.Path`
"""
return Path(self._config.config_dir()) / confit.CONFIG_FILENAME
def with_overlay(self, overlay):
""" Get a new configuration that overlays the provided configuration
over the present configuration.
:param overlay: The configuration to be overlaid
:type overlay: :py:class:`confit.ConfigSource` or dict
:return: A new, merged configuration
:rtype: :py:class:`confit.Configuration`
"""
new_config = copy.deepcopy(self._config)
new_config.set(overlay)
return new_config
def as_view(self):
""" Return the `Configuration` as a :py:class:`confit.ConfigView`
instance.
"""
return self._config
def load_defaults(self, overwrite=True):
""" Load default settings from option templates.
:param overwrite: Whether to overwrite already existing values
"""
for section, template in self.templates.iteritems():
self.set_from_template(section, template, overwrite)
def set_from_template(self, section, template, overwrite=True):
""" Set default options from templates.
:param section: Target section for settings
:type section: unicode
:type template: :py:class:`OptionTemplate`
:param overwrite: Whether to overwrite already existing values
"""
old_settings = self[section].flatten()
settings = copy.deepcopy(old_settings)
for key, option in template.iteritems():
logging.info("Adding setting {0} from {1}"
.format(key, section))
if not overwrite and key in old_settings:
continue
if option.selectable:
settings[key] = option.value[0]
else:
settings[key] = option.value
self[section].set(settings)
def set_from_args(self, args):
""" Apply settings from parsed command-line arguments.
:param args: Parsed command-line arguments
:type args: :py:class:`argparse.Namespace`
"""
for argkey, value in args.__dict__.iteritems():
skip = (value is None or
argkey == 'subcommand' or
argkey.startswith('_'))
if skip:
continue
if '.' in argkey:
section, key = argkey.split('.')
self[section][key] = value
else:
self[argkey] = value
| DIYBookScanner/spreads | spreads/config.py | Python | agpl-3.0 | 8,701 |
import io
import qrcode
from qrcode.image import pil, svg
from odoo import api, models
class IrActionsReport(models.Model):
_inherit = "ir.actions.report"
@api.model
def qr_generate(self, value, box_size=3, border=5, factory="png", **kwargs):
factories = {
"png": pil.PilImage,
"svg": svg.SvgImage,
"svg-fragment": svg.SvgFragmentImage,
"svg-path": svg.SvgPathImage,
}
back_color = kwargs.pop("back_color", "white")
fill_color = kwargs.pop("fill_color", "black")
try:
# Defaults to png if the argument is unknown
image_factory = factories.get(factory, pil.PilImage)
qr = qrcode.QRCode(
box_size=box_size, border=border, image_factory=image_factory, **kwargs
)
qr.add_data(value)
qr.make()
img = qr.make_image(fill_color=fill_color, back_color=back_color)
arr = io.BytesIO()
img.save(arr)
return arr.getvalue()
except Exception:
raise ValueError("Cannot convert into barcode.")
| OCA/reporting-engine | report_qr/models/ir_actions_report.py | Python | agpl-3.0 | 1,138 |
# -*- coding: utf-8 -*-
# Copyright 2016-2017 LasLabs Inc.
# License GPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
{
'name': 'Prescription Verification - States and Logic',
'summary': 'Introduces verified prescription states and related logic',
'version': '10.0.1.0.0',
'author': 'LasLabs, Odoo Community Association (OCA)',
'category': 'Medical',
'website': 'https://laslabs.com',
'license': 'GPL-3',
'installable': True,
'auto_install': False,
'depends': [
'medical_prescription_state',
],
'data': [
'data/base_kanban_stage.xml',
],
'demo': [
'demo/medical_medicament.xml',
'demo/medical_patient.xml',
'demo/medical_patient_medication.xml',
'demo/medical_physician.xml',
'demo/medical_prescription_order.xml',
'demo/medical_prescription_order_line.xml',
]
}
| laslabs/vertical-medical | medical_prescription_state_verify/__manifest__.py | Python | agpl-3.0 | 897 |
# -*- coding: utf-8 -*-
# © 2016 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from .base import BaseTestHrPayslip
class TestWorkedDaysFromActivity(BaseTestHrPayslip):
def test_1(self):
self.timesheet1.button_confirm()
self.timesheet1.signal_workflow("done")
self.timesheet2.button_confirm()
self.timesheet2.signal_workflow("done")
self.assertEqual(self.timesheet1.state, "done")
self.payslip.compute_sheet()
self.payslip.action_import_timesheet_activity()
criteria1 = [
("import_from_activity", "=", True),
("code", "=", "TSCONS"),
("payslip_id", "=", self.payslip.id),
]
wds = self.obj_wd.search(criteria1)
self.assertEqual(len(wds), 1)
self.assertEqual(wds[0].number_of_hours, 8.0)
criteria2 = [
("import_from_activity", "=", True),
("code", "=", "TSADM"),
("payslip_id", "=", self.payslip.id),
]
wds = self.obj_wd.search(criteria2)
self.assertEqual(len(wds), 1)
self.assertEqual(wds[0].number_of_hours, 10.0)
def test_2(self):
self.payslip.compute_sheet()
self.payslip.action_import_timesheet_activity()
criteria1 = [
("import_from_activity", "=", True),
("code", "=", "TSCONS"),
("payslip_id", "=", self.payslip.id),
]
wds = self.obj_wd.search(criteria1)
self.assertEqual(len(wds), 1)
self.assertEqual(wds[0].number_of_hours, 0.0)
criteria2 = [
("import_from_activity", "=", True),
("code", "=", "TSADM"),
("payslip_id", "=", self.payslip.id),
]
wds = self.obj_wd.search(criteria2)
self.assertEqual(len(wds), 1)
self.assertEqual(wds[0].number_of_hours, 0.0)
| open-synergy/opnsynid-hr | hr_worked_days_from_activity/tests/test_hr_payslip.py | Python | agpl-3.0 | 1,897 |
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import account_journal
from . import account_tax_registry
from . import account
from . import vat_registry
| OCA/l10n-italy | l10n_it_vat_registries/models/__init__.py | Python | agpl-3.0 | 179 |
# Copyright 2018 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Partner Experiences",
"version": "8.0.1.3.0",
"category": "Partner",
"website": "https://simetri-sinergi.id",
"author": "OpenSynergy Indonesia, PT. Simetri Sinergi Indonesia",
"license": "AGPL-3",
"installable": True,
"depends": [
"hr_experience",
"partner_contact_job_position",
"partner_job_level",
"partner_education_level",
],
"data": [
"security/ir.model.access.csv",
"security/partner_security.xml",
"views/res_partner_views.xml",
"views/partner_academic_view.xml",
"views/partner_experience_view.xml",
"views/partner_certification_view.xml",
],
}
| open-synergy/opnsynid-partner-contact | partner_experience/__openerp__.py | Python | agpl-3.0 | 792 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "armada.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| kriberg/eve-armada | manage.py | Python | agpl-3.0 | 249 |
# This file is part of Booktype.
# Copyright (c) 2014 Helmy Giacoman <[email protected]>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from rest_framework import status
from django.core.urlresolvers import reverse
from booktype.tests import TestCase
from booktype.tests.factory_models import (UserFactory, BookFactory, BookVersionFactory, ChapterFactory,
BookHistoryFactory, PLAIN_USER_PASSWORD)
class DashboardTest(TestCase):
"""
Tests Dashboard page as logged in and as anonymous user
"""
def setUp(self):
super(DashboardTest, self).setUp()
self.book = BookFactory()
self.book.version = BookVersionFactory(book=self.book) # TODO: improve this
self.book.save()
self.user_1 = self.book.owner
# need two users to be able to test collaboration within a book
self.user_2 = UserFactory()
# setup book content
chapter_1 = ChapterFactory(book=self.book, version=self.book.version)
chapter_2 = ChapterFactory(book=self.book, version=self.book.version)
# setup content for user two in same book
# call this "Contribution"
book_history = BookHistoryFactory(
book=self.book,
user=self.user_2,
chapter=chapter_2
)
self.dispatcher = reverse('accounts:view_profile', args=[self.user_1.username])
def _test_base_details(self, response):
# context should contain all below variables
context_vars = [
'books',
'books_collaborating',
'licenses',
'groups',
'recent_activity',
'book_license',
'book_visible'
]
for var in context_vars:
self.assertTrue(var in response.context)
def test_as_anonymous(self):
response = self.client.get(self.dispatcher)
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
def test_as_account_owner(self):
self.client.login(
username=self.user_2.username,
password=PLAIN_USER_PASSWORD
)
own_dispatcher = reverse('accounts:view_profile', args=[self.user_2.username])
response = self.client.get(own_dispatcher)
context = response.context
# as authenticated user, test basic details
self._test_base_details(response)
# response should contain next things
self.assertContains(response, 'My Dashboard')
self.assertContains(response, 'Log out')
self.assertContains(response, 'Participating Books')
self.assertContains(response, '#createBookModal')
self.assertContains(response, '#importBookModal')
self.assertContains(response, 'id="user-settings"')
# this user is collaborating with other books
self.assertTrue(len(context['books_collaborating']) >= 1)
self.assertTrue(self.book in context['books_collaborating'])
# this user has no groups belonging
self.assertTrue(len(context['groups']) == 0)
def test_other_user_dashboard(self):
self.client.login(
username=self.user_2.username,
password=PLAIN_USER_PASSWORD
)
response = self.client.get(self.dispatcher)
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN
)
| MiczFlor/Booktype | lib/booktype/apps/account/tests/test_dashboard.py | Python | agpl-3.0 | 4,013 |
import base
import os
import re
import unittest
class ClassGenerator(object):
def __init__(self, workflows_directory, module, **kwargs):
self.workflows_directory = workflows_directory
self.module = module
self.pass_through_args = kwargs
def generate_classes(self):
for workflow_name in os.listdir(self.workflows_directory):
ptg = ParticularTestGenerator(workflow_name=workflow_name,
workflow_directory=os.path.join(
self.workflows_directory, workflow_name),
**self.pass_through_args)
ptg.attach_to(self.module)
class ParticularTestGenerator(object):
def __init__(self, workflow_name, workflow_directory, **kwargs):
self.workflow_name = workflow_name
self.workflow_directory = workflow_directory
self.pass_through_args = kwargs
def attach_to(self, module):
cls = type(self.test_name, (base.BaseWorkflowTest, unittest.TestCase),
self.class_dict)
setattr(module, self.test_name, cls)
@property
def class_dict(self):
result = {
'base_command_line': self.base_command_line,
'log_dir': self.log_dir,
'expected_outputs_path': self.expected_outputs_path,
'inputs_path': self.inputs_path,
}
result.update(self.pass_through_args)
return result
@property
def base_command_line(self):
return ['flow', 'execute-workflow', '--block',
'--xml', self.xml_path,
'--resource-file', self.resources_path]
@property
def log_dir(self):
return os.path.join('test_logs', self.workflow_name)
@property
def sanitized_workflow_name(self):
return re.sub('-', '_', self.workflow_name)
@property
def test_name(self):
return self.sanitized_workflow_name + '_test'
@property
def xml_path(self):
return os.path.join(self.workflow_directory, 'workflow.xml')
@property
def inputs_path(self):
return os.path.join(self.workflow_directory, 'inputs.json')
@property
def resources_path(self):
return os.path.join(self.workflow_directory, 'resources.json')
@property
def expected_outputs_path(self):
return os.path.join(self.workflow_directory, 'expected_outputs.json')
| genome/flow-workflow | system_tests/generator.py | Python | agpl-3.0 | 2,388 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Nautical
# Copyright (C) 2013 Sistemas ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class invoice_line(osv.osv):
""""""
_name = 'account.invoice.line'
_inherits = { }
_inherit = [ 'account.invoice.line' ]
_columns = {
'instance_id': fields.many2one('saas_manager.instance', string='Instance'),
}
_defaults = {
}
_constraints = [
]
invoice_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| ingadhoc/odoo-saas-manager | addons/saas_manager_invoicing/TOREM_invoice_line.py | Python | agpl-3.0 | 1,408 |
# Copyright 2013-2021 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class AccountTax(models.Model):
_inherit = "account.tax"
account_accrued_revenue_id = fields.Many2one(
comodel_name="account.account",
string="Accrued Revenue Tax Account",
domain=[("deprecated", "=", False)],
check_company=True,
)
account_accrued_expense_id = fields.Many2one(
comodel_name="account.account",
string="Accrued Expense Tax Account",
domain=[("deprecated", "=", False)],
check_company=True,
)
| OCA/account-closing | account_cutoff_base/models/account_tax.py | Python | agpl-3.0 | 707 |
import os
import sys
import logging
import fixture.examples.django_example
sys.path.append(os.path.dirname(fixture.examples.django_example.__file__))
## this is set by the test script?
# os.environ['DJANGO_SETTINGS_MODULE'] = 'project.settings'
## handled by NoseDjango now?
# from django.core.management import call_command
# log = logging.getLogger('nose.django_loadable')
#
# def setup():
# call_command('syncdb', interactive=False)
# call_command('reset', 'app', 'blog', interactive=False) | patrickod/fixture | fixture/test/test_loadable/test_django/__init__.py | Python | lgpl-2.1 | 505 |
"""Loading icons."""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import gtk
from logging import warn
import math
def load_icon(icon_path, icon_width=None, icon_height=None):
"""Load icon from path. Icon MUST be in PNG format.
@param icon_path: pathname of icon, or None to load nothing
@return: a GdkPixbuf, or None on failure"""
if not icon_path:
return None
def size_prepared_cb(loader, width, height):
dest_width = icon_width or width
dest_height = icon_height or height
if dest_width == width and dest_height == height:
return
ratio_width = float(dest_width) / width
ratio_height = float(dest_height) / height
ratio = min(ratio_width, ratio_height)
# preserve original ration
if ratio_width != ratio:
dest_width = int(math.ceil(width * ratio))
elif ratio_height != ratio:
dest_height = int(math.ceil(height * ratio))
loader.set_size(int(dest_width), int(dest_height))
# Restrict icon formats to avoid attacks
try:
loader = gtk.gdk.PixbufLoader('png')
if icon_width or icon_height:
loader.connect('size-prepared', size_prepared_cb)
try:
loader.write(file(icon_path).read())
finally:
loader.close()
return loader.get_pixbuf()
except Exception, ex:
warn(_("Failed to load cached PNG icon: %s") % ex)
return None
| pombredanne/zero-install | zeroinstall/gtkui/icon.py | Python | lgpl-2.1 | 1,367 |
# Copyright (C) 2008 Red Hat, Inc.
# Copyright (C) 2016 Sam Parkinson <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
'''
The bundle builder provides a build system for Sugar activities. Usually, it
is setup by creating a `setup.py` file in the project with the following::
# setup.py
#!/usr/bin/env python
from sugar3.activity import bundlebuilder
bundlebuilder.start()
AppStream Metadata
==================
AppStream is the standard, distro-agnostic way of providing package metadata.
For Sugar activities, the AppStream metadata is automatically exported from
the activity.info file by the bundlebuilder.
Activities must have the following metadata fields under the [Activity] header
(of the `activity.info` file):
* `metadata_license` - license for screenshots and description. AppStream
requests only using one of the following: `CC0-1.0`, `CC-BY-3.0`,
`CC-BY-SA-3.0` or `GFDL-1.3`
* `license` - a `SPDX License Code`__, eg. `GPL-3.0+`
* `name`, `icon`, `bundle_id`, `summary` - same usage as in Sugar
* `description` - a long (multi paragraph) description of your application.
This must be written in a subset of HTML. Only the p, ol, ul and li tags
are supported.
Other good metadata items to have are:
* `url` - link to the home page for the activity on the internet
* `repository_url` - link to repository for activity code
* `screenshots` - a space separated list of screenshot URLs. PNG or JPEG files
are supported.
__ http://spdx.org/licenses/
Example `activity.info`
-----------------------
.. code-block:: ini
:emphasize-lines: 10-12,20-21
[Activity]
name = Browse
bundle_id = org.laptop.WebActivity
exec = sugar-activity webactivity.WebActivity
activity_version = 200
icon = activity-web
max_participants = 100
summary = Surf the world!
license = GPL-2.0+
metadata_license = CC0-1.0
description:
<p>Surf the world! Here you can do research, watch educational videos, take online courses, find books, connect with friends and more. Browse is powered by the WebKit2 rendering engine with the Faster Than Light javascript interpreter - allowing you to view the full beauty of the web.</p>
<p>To help in researching, Browse offers many features:</p>
<ul>
<li>Bookmark (save) good pages you find - never loose good resources or forget to add them to your bibliography</li>
<li>Bookmark pages with collaborators in real time - great for researching as a group or teachers showing pages to their class</li>
<li>Comment on your bookmarked pages - a great tool for making curated collections</li>
</ul>
url = https://github.com/sugarlabs/browse-activity
screenshots = https://people.sugarlabs.org/sam/activity-ss/browse-1-1.png https://people.sugarlabs.org/sam/activity-ss/browse-1-2.png
'''
import argparse
import operator
import os
import sys
import zipfile
import tarfile
import unittest
import shutil
import subprocess
import re
import gettext
import logging
from glob import glob
from fnmatch import fnmatch
from ConfigParser import ConfigParser
import xml.etree.cElementTree as ET
from HTMLParser import HTMLParser
from sugar3 import env
from sugar3.bundle.activitybundle import ActivityBundle
IGNORE_DIRS = ['dist', '.git', 'screenshots']
IGNORE_FILES = ['.gitignore', 'MANIFEST', '*.pyc', '*~', '*.bak', 'pseudo.po']
def list_files(base_dir, ignore_dirs=None, ignore_files=None):
result = []
base_dir = os.path.abspath(base_dir)
for root, dirs, files in os.walk(base_dir):
if ignore_files:
for pattern in ignore_files:
files = [f for f in files if not fnmatch(f, pattern)]
rel_path = root[len(base_dir) + 1:]
for f in files:
result.append(os.path.join(rel_path, f))
if ignore_dirs and root == base_dir:
for ignore in ignore_dirs:
if ignore in dirs:
dirs.remove(ignore)
return result
class Config(object):
def __init__(self, source_dir, dist_dir=None, dist_name=None):
self.source_dir = source_dir
self.build_dir = os.getcwd()
self.dist_dir = dist_dir or os.path.join(self.build_dir, 'dist')
self.dist_name = dist_name
self.bundle = None
self.version = None
self.activity_name = None
self.bundle_id = None
self.bundle_name = None
self.bundle_root_dir = None
self.tar_root_dir = None
self.xo_name = None
self.tar_name = None
self.summary = None
self.description = None
self.update()
def update(self):
self.bundle = bundle = ActivityBundle(self.source_dir,
translated=False)
self.version = bundle.get_activity_version()
self.activity_name = bundle.get_name()
self.bundle_id = bundle.get_bundle_id()
self.summary = bundle.get_summary()
self.description = bundle.get_description()
self.bundle_name = reduce(operator.add, self.activity_name.split())
self.bundle_root_dir = self.bundle_name + '.activity'
self.tar_root_dir = '%s-%s' % (self.bundle_name, self.version)
if self.dist_name:
self.xo_name = '%s.xo' % self.dist_name
self.tar_name = '%s.tar.bz2' % self.dist_name
else:
self.xo_name = '%s-%s.xo' % (self.bundle_name, self.version)
self.tar_name = '%s-%s.tar.bz2' % (self.bundle_name, self.version)
class Builder(object):
def __init__(self, config, no_fail=False):
self.config = config
self._no_fail = no_fail
self.locale_dir = os.path.join(self.config.build_dir, 'locale')
def build(self):
self.build_locale()
def build_locale(self):
po_dir = os.path.join(self.config.source_dir, 'po')
if not self.config.bundle.is_dir(po_dir):
logging.warn('Missing po/ dir, cannot build_locale')
return
if os.path.exists(self.locale_dir):
shutil.rmtree(self.locale_dir)
for f in os.listdir(po_dir):
if not f.endswith('.po') or f == 'pseudo.po':
continue
file_name = os.path.join(po_dir, f)
lang = f[:-3]
localedir = os.path.join(self.config.build_dir, 'locale', lang)
mo_path = os.path.join(localedir, 'LC_MESSAGES')
if not os.path.isdir(mo_path):
os.makedirs(mo_path)
mo_file = os.path.join(mo_path, '%s.mo' % self.config.bundle_id)
args = ['msgfmt', '--output-file=%s' % mo_file, file_name]
retcode = subprocess.call(args)
if retcode:
print 'ERROR - msgfmt failed with return code %i.' % retcode
if self._no_fail:
continue
cat = gettext.GNUTranslations(open(mo_file, 'r'))
translated_name = cat.gettext(self.config.activity_name)
translated_summary = cat.gettext(self.config.summary)
if translated_summary is None:
translated_summary = ''
if translated_summary.find('\n') > -1:
translated_summary = translated_summary.replace('\n', '')
logging.warn(
'Translation of summary on file %s have \\n chars. '
'Should be removed' % file_name)
linfo_file = os.path.join(localedir, 'activity.linfo')
f = open(linfo_file, 'w')
f.write('[Activity]\nname = %s\n' % translated_name)
f.write('summary = %s\n' % translated_summary)
f.close()
def get_locale_files(self):
return list_files(self.locale_dir, IGNORE_DIRS, IGNORE_FILES)
class Packager(object):
def __init__(self, config):
self.config = config
self.package_path = None
if not os.path.exists(self.config.dist_dir):
os.mkdir(self.config.dist_dir)
def get_files_in_git(self, root=None):
if root is None:
root = self.config.source_dir
git_ls = None
try:
git_ls = subprocess.Popen(['git', 'ls-files'],
stdout=subprocess.PIPE,
cwd=root)
except OSError:
logging.warn('Packager: git is not installed, '
'fall back to filtered list')
if git_ls is not None:
stdout, _ = git_ls.communicate()
if git_ls.returncode:
# Fall back to filtered list
logging.warn('Packager: this is not a git repository, '
'fall back to filtered list')
elif stdout:
# pylint: disable=E1103
git_output = [path.strip() for path in
stdout.strip('\n').split('\n')]
files = []
for line in git_output:
ignore = False
for directory in IGNORE_DIRS:
if line.startswith(directory + '/'):
ignore = True
break
if not ignore:
sub_path = os.path.join(root, line)
if os.path.isdir(sub_path) \
and os.path.exists(os.path.join(sub_path, '.git')):
sub_list = self.get_files_in_git(sub_path)
for f in sub_list:
files.append(os.path.join(line, f))
else:
files.append(line)
for pattern in IGNORE_FILES:
files = [f for f in files if not fnmatch(f, pattern)]
return files
return list_files(self.config.source_dir,
IGNORE_DIRS, IGNORE_FILES)
class XOPackager(Packager):
def __init__(self, builder):
Packager.__init__(self, builder.config)
self.builder = builder
self.builder.build_locale()
self.package_path = os.path.join(self.config.dist_dir,
self.config.xo_name)
def package(self):
bundle_zip = zipfile.ZipFile(self.package_path, 'w',
zipfile.ZIP_DEFLATED)
for f in self.get_files_in_git():
bundle_zip.write(os.path.join(self.config.source_dir, f),
os.path.join(self.config.bundle_root_dir, f))
for f in self.builder.get_locale_files():
bundle_zip.write(os.path.join(self.builder.locale_dir, f),
os.path.join(self.config.bundle_root_dir,
'locale', f))
bundle_zip.close()
class SourcePackager(Packager):
def __init__(self, config):
Packager.__init__(self, config)
self.package_path = os.path.join(self.config.dist_dir,
self.config.tar_name)
def package(self):
tar = tarfile.open(self.package_path, 'w:bz2')
for f in self.get_files_in_git():
tar.add(os.path.join(self.config.source_dir, f),
os.path.join(self.config.tar_root_dir, f))
tar.close()
class Installer(Packager):
def __init__(self, builder):
Packager.__init__(self, builder.config)
self.builder = builder
def install(self, prefix, install_mime=True, install_desktop_file=True):
self.builder.build()
activity_path = os.path.join(prefix, 'share', 'sugar', 'activities',
self.config.bundle_root_dir)
source_to_dest = {}
for f in self.get_files_in_git():
source_path = os.path.join(self.config.source_dir, f)
dest_path = os.path.join(activity_path, f)
source_to_dest[source_path] = dest_path
for f in self.builder.get_locale_files():
source_path = os.path.join(self.builder.locale_dir, f)
if source_path.endswith(".mo"):
dest_path = os.path.join(prefix, 'share', 'locale', f)
else:
dest_path = os.path.join(activity_path, 'locale', f)
source_to_dest[source_path] = dest_path
for source, dest in source_to_dest.items():
print 'Install %s to %s.' % (source, dest)
path = os.path.dirname(dest)
if not os.path.exists(path):
os.makedirs(path)
shutil.copy(source, dest)
if install_mime:
self.config.bundle.install_mime_type(self.config.source_dir)
if install_desktop_file:
self._install_desktop_file(prefix, activity_path)
self._generate_appdata(prefix, activity_path)
def _install_desktop_file(self, prefix, activity_path):
cp = ConfigParser()
section = 'Desktop Entry'
cp.add_section(section)
cp.optionxform = str # Allow CamelCase entries
# Get it from the activity.info for the non-translated version
info = ConfigParser()
info.read(os.path.join(activity_path, 'activity', 'activity.info'))
cp.set(section, 'Name', info.get('Activity', 'name'))
if info.has_option('Activity', 'summary'):
cp.set(section, 'Comment', info.get('Activity', 'summary'))
for path in sorted(glob(os.path.join(activity_path, 'locale',
'*', 'activity.linfo'))):
locale = path.split(os.path.sep)[-2]
info = ConfigParser()
info.read(path)
if info.has_option('Activity', 'name'):
cp.set(section, 'Name[{}]'.format(locale),
info.get('Activity', 'name'))
if info.has_option('Activity', 'summary'):
cp.set(section, 'Comment[{}]'.format(locale),
info.get('Activity', 'summary'))
cp.set(section, 'Terminal', 'false')
cp.set(section, 'Type', 'Application')
cp.set(section, 'Categories', 'Education;')
cp.set(section, 'Icon', os.path.join(
activity_path, 'activity', self.config.bundle.get_icon_filename()))
cp.set(section, 'Exec', self.config.bundle.get_command())
cp.set(section, 'Path', activity_path) # Path == CWD for running
name = '{}.activity.desktop'.format(self.config.bundle_id)
path = os.path.join(prefix, 'share', 'applications', name)
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'w') as f:
cp.write(f)
def _generate_appdata(self, prefix, activity_path):
info = ConfigParser()
info.read(os.path.join(activity_path, 'activity', 'activity.info'))
required_fields = ['metadata_license', 'license', 'name', 'icon',
'description']
for name in required_fields:
if not info.has_option('Activity', name):
print('[WARNING] Activity needs more metadata for AppStream '
'file')
print(' Without an AppStream file, the activity will NOT '
'show in software stores!')
print(' Please `pydoc sugar3.activity.bundlebuilder` for'
'more info')
return
# See https://www.freedesktop.org/software/appstream/docs/
root = ET.Element('component', type='desktop')
ET.SubElement(root, 'project_group').text = 'Sugar'
ET.SubElement(root, 'translation', type='gettext').text = \
self.config.bundle_id
ET.SubElement(root, 'id').text = \
self.config.bundle_id + '.activity.desktop'
desc = ET.fromstring('<description>{}</description>'.format(
info.get('Activity', 'description')))
root.append(desc)
copy_pairs = [('metadata_license', 'metadata_license'),
('license', 'project_license'),
('summary', 'summary'),
('name', 'name')]
for key, ename in copy_pairs:
ET.SubElement(root, ename).text = info.get('Activity', key)
if info.has_option('Activity', 'screenshots'):
screenshots = info.get('Activity', 'screenshots').split()
ss_root = ET.SubElement(root, 'screenshots')
for i, screenshot in enumerate(screenshots):
e = ET.SubElement(ss_root, 'screenshot')
if i == 0:
e.set('type', 'default')
ET.SubElement(e, 'image').text = screenshot
if info.has_option('Activity', 'url'):
ET.SubElement(root, 'url', type='homepage').text = \
info.get('Activity', 'url')
if info.has_option('Activity', 'repository_url'):
ET.SubElement(root, 'url', type='bugtracker').text = \
info.get('Activity', 'repository_url')
path = os.path.join(prefix, 'share', 'metainfo',
self.config.bundle_id + '.appdata.xml')
if not os.path.isdir(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
tree = ET.ElementTree(root)
tree.write(path, encoding='UTF-8')
def cmd_check(config, options):
"""Run tests for the activity"""
run_unit_test = True
run_integration_test = True
if options.choice == 'unit':
run_integration_test = False
if options.choice == 'integration':
run_unit_test = False
print "Running Tests"
test_path = os.path.join(config.source_dir, "tests")
if os.path.isdir(test_path):
unit_test_path = os.path.join(test_path, "unit")
integration_test_path = os.path.join(test_path, "integration")
sys.path.append(config.source_dir)
# Run Tests
if os.path.isdir(unit_test_path) and run_unit_test:
all_tests = unittest.defaultTestLoader.discover(unit_test_path)
unittest.TextTestRunner(verbosity=options.verbose).run(all_tests)
elif not run_unit_test:
print "Not running unit tests"
else:
print 'No "unit" directory found.'
if os.path.isdir(integration_test_path) and run_integration_test:
all_tests = unittest.defaultTestLoader.discover(
integration_test_path)
unittest.TextTestRunner(verbosity=options.verbose).run(all_tests)
elif not run_integration_test:
print "Not running integration tests"
else:
print 'No "integration" directory found.'
print "Finished testing"
else:
print "Error: No tests/ directory"
def cmd_dev(config, options):
"""Setup for development"""
bundle_path = env.get_user_activities_path()
if not os.path.isdir(bundle_path):
os.mkdir(bundle_path)
bundle_path = os.path.join(bundle_path, config.bundle_root_dir)
try:
os.symlink(config.source_dir, bundle_path)
except OSError:
if os.path.islink(bundle_path):
print 'ERROR - The bundle has been already setup for development.'
else:
print 'ERROR - A bundle with the same name is already installed.'
def cmd_dist_xo(config, options):
"""Create a xo bundle package"""
no_fail = False
if options is not None:
no_fail = options.no_fail
packager = XOPackager(Builder(config, no_fail))
packager.package()
def cmd_fix_manifest(config, options):
'''Add missing files to the manifest (OBSOLETE)'''
print 'WARNING: The fix_manifest command is obsolete.'
print ' The MANIFEST file is no longer used in bundles,'
print ' please remove it.'
def cmd_dist_source(config, options):
"""Create a tar source package"""
packager = SourcePackager(config)
packager.package()
def cmd_install(config, options):
"""Install the activity in the system"""
installer = Installer(Builder(config))
installer.install(options.prefix, options.install_mime)
def _po_escape(string):
return re.sub('([\\\\"])', '\\\\\\1', string)
def cmd_genpot(config, options):
"""Generate the gettext pot file"""
os.chdir(config.source_dir)
po_path = os.path.join(config.source_dir, 'po')
if not os.path.isdir(po_path):
os.mkdir(po_path)
python_files = []
for root, dirs_dummy, files in os.walk(config.source_dir):
for file_name in files:
if file_name.endswith('.py'):
file_path = os.path.relpath(os.path.join(root, file_name),
config.source_dir)
python_files.append(file_path)
# First write out a stub .pot file containing just the translated
# activity name, then have xgettext merge the rest of the
# translations into that. (We can't just append the activity name
# to the end of the .pot file afterwards, because that might
# create a duplicate msgid.)
pot_file = os.path.join('po', '%s.pot' % config.bundle_name)
escaped_name = _po_escape(config.activity_name)
f = open(pot_file, 'w')
f.write('#: activity/activity.info:2\n')
f.write('msgid "%s"\n' % escaped_name)
f.write('msgstr ""\n')
if config.summary is not None:
escaped_summary = _po_escape(config.summary)
f.write('#: activity/activity.info:3\n')
f.write('msgid "%s"\n' % escaped_summary)
f.write('msgstr ""\n')
if config.description is not None:
parser = HTMLParser()
strings = []
parser.handle_data = strings.append
parser.feed(config.description)
for s in strings:
s = s.strip()
if s:
f.write('#: activity/activity.info:4\n')
f.write('msgid "%s"\n' % _po_escape(s))
f.write('msgstr ""\n')
f.close()
args = ['xgettext', '--join-existing', '--language=Python',
'--keyword=_', '--add-comments=TRANS:', '--output=%s' % pot_file]
args += python_files
retcode = subprocess.call(args)
if retcode:
print 'ERROR - xgettext failed with return code %i.' % retcode
def cmd_build(config, options):
"""Build generated files"""
builder = Builder(config)
builder.build()
def start():
parser = argparse.ArgumentParser(prog='./setup.py')
subparsers = parser.add_subparsers(
dest="command", help="Options for %(prog)s")
install_parser = subparsers.add_parser(
"install", help="Install the activity in the system")
install_parser.add_argument(
"--prefix", dest="prefix", default=sys.prefix,
help="Path for installing")
install_parser.add_argument(
"--skip-install-mime", dest="install_mime",
action="store_false", default=True,
help="Skip the installation of custom mime types in the system")
check_parser = subparsers.add_parser(
"check", help="Run tests for the activity")
check_parser.add_argument("choice", nargs='?',
choices=['unit', 'integration'],
help="run unit/integration test")
check_parser.add_argument("--verbosity", "-v", dest="verbose",
type=int, choices=range(0, 3),
default=1, nargs='?',
help="verbosity for the unit tests")
dist_parser = subparsers.add_parser("dist_xo",
help="Create a xo bundle package")
dist_parser.add_argument(
"--no-fail", dest="no_fail", action="store_true", default=False,
help="continue past failure when building xo file")
subparsers.add_parser("dist_source", help="Create a tar source package")
subparsers.add_parser("build", help="Build generated files")
subparsers.add_parser(
"fix_manifest", help="Add missing files to the manifest (OBSOLETE)")
subparsers.add_parser("genpot", help="Generate the gettext pot file")
subparsers.add_parser("dev", help="Setup for development")
options = parser.parse_args()
source_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
config = Config(source_dir)
try:
globals()['cmd_' + options.command](config, options)
except (KeyError, IndexError):
parser.print_help()
if __name__ == '__main__':
start()
| i5o/sugar-toolkit-gtk3 | src/sugar3/activity/bundlebuilder.py | Python | lgpl-2.1 | 25,269 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2009 by the FIFE team
# http://www.fifengine.de
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
import Tkinter as TK
import math
class Ruleset (object):
def __init__ (self):
# move to somewhere else later (file ...)
self.ST_name = 'Strength'
self.ST_desc = 'The strenght of your character, affects hitpoints, carry weight and the weapons skill.'
self.AG_name = 'Agility'
self.AG_desc = 'The agility of your character influences armor class, action points and the weapons skill.'
self.IN_name = 'Intelligence'
self.IN_desc = 'Your intelligence is important for the technology skill and your ability to talk with other characters.'
self.weapon_name = 'Weapons'
self.weapon_desc = 'A high weapons skill will let you fire weapons more precisly.'
self.tech_name = 'Technology'
self.tech_desc = 'Boost this skill to become a real hacker.'
self.talk_name = 'Talk'
self.talk_desc = 'A high talk skill can save bullets.'
def set_main (self, ST, AG, IN):
self.ST = ST
self.AG = AG
self.IN = IN
# now calc boni
self.calc_boni()
def calc_boni (self):
self.STbonus = 0
self.AGbonus = 0
self.INbonus = 0
if self.ST > 4 :
self.STbonus = (self.ST - 4) * 1
if self.AG > 2 :
self.AGbonus = (self.AG - 2) * 1
if self.IN > 4 :
self.INbonus = (self.AG - 5) * 1
def skill_weapon (self, type, count):
# weapon = 2 x (ST + AG) + 10%
#if self.weapon == False:
self.weapon = 2 * (self.ST + self.AG) + self.ST + self.AG
if type == 1 :
# increase
if count != 0 :
self.weapon = self.weapon + count
if type == 0 :
# decrease
if (self.weapon - count) != 0 :
self.weapon = self.weapon - count
else:
self.weapon = 0
def skill_tech (self, type, count):
self.tech = 3 * self.IN + 2 * self.INbonus
if type == 1 :
# increase
if count != 0 :
self.tech = self.tech + count
if type == 0 :
# decrease
if (self.tech - count) != 0 :
self.tech = self.tech - count
else:
self.tech = 0
def skill_talk (self, type, count):
self.talk = 2 * self.IN + self.INbonus
if type == 1 :
# increase
if count != 0 :
self.talk = self.talk + count
if type == 0 :
# decrease
if (self.talk - count) != 0 :
self.talk = self.talk - count
else:
self.talk = 0
def calc_skills (self):
self.skill_weapon(0,0)
self.skill_tech(0,0)
self.skill_talk(0,0)
class GUI (object):
def __init__ (self):
self.root = TK.Tk()
self.root.title('FIFE Techdemo Ruleset-tester')
self.root.geometry("350x100")
# inject ruleset
self.RULES = Ruleset()
self.RULES.set_main(2,2,2)
self.RULES.calc_skills()
self.frm1 = TK.Frame(master=self.root)
self.frm1.grid(column = 1, row = 1)
self.create_widgets()
self.create_buttons()
def create_widgets (self):
mainstat = {
"a" : [self.RULES.ST_name, self.RULES.ST_desc, self.RULES.ST, self.RULES.STbonus],
"b" : [self.RULES.AG_name, self.RULES.AG_desc, self.RULES.AG, self.RULES.AGbonus],
"c" : [self.RULES.IN_name, self.RULES.IN_desc, self.RULES.IN, self.RULES.INbonus]
}
skills = {
"a" : [self.RULES.weapon_name, self.RULES.weapon_desc, self.RULES.weapon],
"b" : [self.RULES.tech_name, self.RULES.tech_desc, self.RULES.tech],
"c" : [self.RULES.talk_name, self.RULES.talk_desc, self.RULES.talk]
}
col = 1
row = 2
# container for generated entry-widgets
self.entries = []
self.entry_vars = []
# create widgets for mainstat
for key in mainstat:
label = TK.Label(self.frm1, text=mainstat[key][0], relief= TK.GROOVE, bd=0, width=10, anchor=TK.W)
label.grid(column = col, row = row)
col = col + 1
self.entry_vars.append(TK.StringVar(self.root))
entry_key = TK.Entry(self.frm1, width=2, textvariable=self.entry_vars[-1])
entry_key.grid(column = col, row = row, padx = 0)
entry_key.insert(0, mainstat[key][2])
col = col + 1
label = TK.Label(self.frm1, text=mainstat[key][3], relief= TK.RIDGE, bd=2, width=3)
label.grid(column = col, row = row)
row = row + 1
col = 1
self.entries.append(entry_key)
col = 5
row = 2
for key in skills:
label = TK.Label(self.frm1, text=skills[key][0], relief= TK.GROOVE, bd=0, width=10, anchor=TK.W)
label.grid(column = col, row = row, padx = 4)
col = col + 1
label = TK.Label(self.frm1, text=skills[key][2], relief= TK.RIDGE, bd=2, width=3)
label.grid(column = col, row = row)
row = row + 1
col = 5
def create_buttons (self):
col = 6
row = 6
button_calc = TK.Button(self.frm1, text='Calculate', command=self.calc)
button_calc.grid(column = col, row = row)
col = 7
button_quit = TK.Button(self.frm1, text='Quit', command=self.exit)
button_quit.grid(column = col, row = row)
pass
def calc (self):
# prepare entrys for calculation
tmp_vars = []
for i,var in enumerate(self.entry_vars) :
inumber = var.get()
tmp_vars.append(int(inumber))
# set new mainstats & skill values
# 0 = weapons
# 2 = talk
# 1 = technology
self.RULES.set_main(tmp_vars[0],tmp_vars[2],tmp_vars[1])
self.RULES.calc_skills()
# print new stats
self.create_widgets()
def exit(self):
self.root.quit()
def run (self):
self.root.mainloop()
# demo
if __name__ == '__main__':
gui = GUI()
gui.run()
| mgeorgehansen/FIFE_Technomage | tools/ruleset_tester.py | Python | lgpl-2.1 | 6,272 |
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
pycopia.WWW
===========
Web and web server related modules. Provides:
- WSGI web application framework.
- XHTML markup generator.
- Cookie handling.
- HTTP header parsing and generation.
- FCGI to WSGI server adapter.
- Enhanced urllib.
- List of common user agents.
"""
| xiangke/pycopia | WWW/pycopia/WWW/__init__.py | Python | lgpl-2.1 | 954 |
#!/usr/bin/python2.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
#
# $Id$
#
# Copyright (C) 1999-2006 Keith Dart <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
"""
XGzip Test.
"""
import sys
import qatest
import x_gzip
class XGzipBaseTest(qatest.Test):
pass
class ReadChunks(XGzipBaseTest):
def test_method(self, readsize):
chunks = []
gz = x_gzip.open("gzipped2.txt.gz", "r")
self.verboseinfo(gz.header)
self.assert_equal(gz.header.name, "gzipped.txt")
while 1:
d = gz.read(readsize)
if not d:
break
chunks.append(d)
gz.close()
del gz
data = "".join(chunks)
lines = data.split("\n")
# got chunks, data as text, and lines. Now do some checks.
self.assert_equal(len(data), 6378, "Should be 6378 chars, got %d" % (len(data),))
self.assert_equal(data.count("\n"), 80, "Should be 80 lines, got %d" % (len(lines),))
self.assert_equal(lines[40].strip(), "0:This file is compressed using gzip.")
return self.passed("all assertions passed.")
class XGzipSuite(qatest.TestSuite):
pass
def get_suite(conf):
suite = XGzipSuite(conf)
suite.add_test(ReadChunks, 512)
suite.add_test(ReadChunks, 1024)
suite.add_test(ReadChunks, 2048)
suite.add_test(ReadChunks, 4096)
suite.add_test(ReadChunks, 6377)
suite.add_test(ReadChunks, 6378)
suite.add_test(ReadChunks, 6379)
suite.add_test(ReadChunks, 8192)
return suite
def run(conf):
suite = get_suite(conf)
suite()
| xiangke/pycopia | experimental/pycopia/_unittest/test_x_gzip.py | Python | lgpl-2.1 | 2,104 |
from markdown.blockprocessors import BlockProcessor
from MooseCommonExtension import MooseCommonExtension
import glob
import re
import os
from markdown.util import etree
class MooseCarousel(BlockProcessor, MooseCommonExtension):
"""
Markdown extension for showing a bootstrap carousel of images.
Markdown syntax is:
!slideshow <options>
images/intro.png caption=Some caption
images/more*.png
Where <options> are key=value pairs.
See http://getbootstrap.com/javascript/#carousel for allowed options.
Additionally, "caption" can also be used on the slideshow line to
set a default caption.
It is assumed image names will have the same filepath as on the webserver.
"""
RE = re.compile(r'^!\ ?slideshow(.*)')
# If there are multiple carousels on the same page then
# they need to have different ids
MATCHES_FOUND = 0
def __init__(self, parser, root=None, **kwargs):
MooseCommonExtension.__init__(self)
BlockProcessor.__init__(self, parser, **kwargs)
self._root = os.path.join(root, 'docs/media')
# The default settings
self._settings = {'caption' : None,
'interval' : None,
'pause' : None,
'wrap' : None,
'keyboard' : None}
def parseFilenames(self, filenames_block):
"""
Parse a set of lines with filenames in them and an optional caption.
Filenames can contain wildcards and glob will be used to expand them.
Expected input is similar to:
images/1.png caption=My caption
images/other*.png
Input:
filenames_block[str]: String block to parse
Return:
list of dicts. Each dict has keys of "path" which is the filename path
and "caption" which is the associated caption. Caption will be "" if not
specified.
"""
lines = filenames_block.split("\n")
files = []
for line in lines:
sline = line.strip()
idx = sline.find("caption=")
if idx >=0 :
caption = sline[idx+8:].strip()
fname = sline[:idx].strip()
else:
caption = ""
fname = sline
new_files = glob.glob(os.path.join(self._root, fname))
if not new_files:
# If one of the paths is broken then
# we return an empty list to indicate
# an error state
return []
for f in new_files:
files.append({"path": f, "caption": caption})
return files
def test(self, parent, block):
"""
Test to see if we should process this block of markdown.
Inherited from BlockProcessor.
"""
return self.RE.search(block)
def run(self, parent, blocks):
"""
Called when it is determined that we can process this block.
This will convert the markdown into HTML
"""
sibling = self.lastChild(parent)
block = blocks.pop(0)
m = self.RE.search(block)
if m:
# Parse out the options on the slideshow line
options = m.group(1)
parsed_options, styles = self.getSettings(options)
block = block[m.end() + 1:] # removes the slideshow line
block, theRest = self.detab(block)
if m:
files = block
div = self.addStyle(etree.SubElement(parent, "div"), **styles)
filenames = self.parseFilenames(files)
if not filenames:
return self.createErrorElement(files, "No matching files found")
self.createCarousel(parsed_options, div, filenames)
# We processed this whole block so mark it as done
block = ""
else:
div = sibling
self.parser.parseChunk(div, block)
if theRest:
blocks.insert(0, theRest)
def createCarousel(self, options, top_div, files):
"""
Creates the actual HTML required for the carousel to work.
Input:
options[dict]: Set on the slideshow line of the markdown
top_div: div element that will be the carousel
files[list]: List of dicts with filename paths and associated captions
"""
carousel_options = {
"interval": options.get("interval", "5000"),
"pause": options.get("pause", "hover"),
"wrap": options.get("wrap", "true"),
"keyboard": options.get("keyboard", "true"),
}
cid = "carousel%s" % self.MATCHES_FOUND
top_div.set("id", cid)
top_div.set("class", "carousel slide")
top_div.set("data-ride", "carousel")
top_div.set("data-interval", carousel_options["interval"])
top_div.set("data-pause", carousel_options["pause"])
top_div.set("data-wrap", carousel_options["wrap"])
top_div.set("data-keyboard", carousel_options["keyboard"])
ol = etree.SubElement(top_div, 'ol')
ol.set("class", "carousel-indicators")
default_caption = options.get("caption", "")
for i in range(len(files)):
li = etree.SubElement(ol, 'li')
li.set("data-target", "#%s" % cid)
if i == 0:
li.set("class", "active")
li.set("data-slide-to", str(i))
inner_div = etree.SubElement(top_div, 'div')
inner_div.set("class", "carousel-inner")
inner_div.set("role", "listbox")
for i, f in enumerate(files):
item_div = etree.SubElement(inner_div, "div")
active = ""
if i == 0:
active = "active"
item_div.set("class", "item %s" % active)
img = etree.SubElement(item_div, "img")
img.set("src", os.path.join('/media', os.path.basename(f["path"])))
caption = f["caption"]
if not caption:
caption = default_caption
if caption:
cap_div = etree.SubElement(item_div, "div")
cap_div.set("class", "carousel-caption")
cap_div.text = caption
self.addControl(top_div, cid, "prev", "Previous")
self.addControl(top_div, cid, "next", "Next")
self.MATCHES_FOUND += 1
return top_div
def addControl(self, parent, cid, direction, text):
"""
Utility function to add the left/right controls on the carousel.
Input:
parent: parent element
cid: id of the carousel element
direction: "prev" or "next" to indicate which direction this control operates on.
text: The alternate text for screen readers.
"""
c = etree.SubElement(parent, "a")
alt_dir = "left"
chev = "fa-chevron-circle-left"
if direction == "next":
alt_dir = "right"
chev = "fa-chevron-circle-right"
c.set("class", "%s carousel-control" % alt_dir)
c.set("href", "#%s" % cid)
c.set("role", "button")
c.set("data-slide", direction)
s = etree.SubElement(c, "span")
s.set("class", "fa %s" % chev)
s.set("aria-hidden", "true")
s = etree.SubElement(c, "span")
s.set("class", "sr-only")
s.text = text
| katyhuff/moose | python/MooseDocs/extensions/MooseCarousel.py | Python | lgpl-2.1 | 7,185 |
# Written by Bram Cohen
# see LICENSE.txt for license information
import sys
from types import UnicodeType, StringType, LongType, IntType, ListType, DictType
from re import compile
#reg = compile(r'^[^/\\.~][^/\\]*$')
#reg = compile(r'^[^/\\]*$')
ints = (LongType, IntType)
def check_info(info):
if type(info) != DictType:
raise ValueError, 'bad metainfo - not a dictionary'
if info.has_key('pieces'):
pieces = info.get('pieces')
if type(pieces) != StringType or len(pieces) % 20 != 0:
raise ValueError, 'bad metainfo - bad pieces key'
elif info.has_key('root hash'):
# Merkle
root_hash = info.get('root hash')
if type(root_hash) != StringType or len(root_hash) != 20:
raise ValueError, 'bad metainfo - bad root hash key'
piecelength = info.get('piece length')
if type(piecelength) not in ints or piecelength <= 0:
raise ValueError, 'bad metainfo - illegal piece length'
name = info.get('name')
if StringType != type(name) != UnicodeType:
raise ValueError, 'bad metainfo - bad name'
#if not reg.match(name):
# raise ValueError, 'name %s disallowed for security reasons' % name
if info.has_key('files') == info.has_key('length'):
raise ValueError, 'single/multiple file mix'
if info.has_key('length'):
length = info.get('length')
if type(length) not in ints or length < 0:
raise ValueError, 'bad metainfo - bad length'
else:
files = info.get('files')
if type(files) != ListType:
raise ValueError
for f in files:
if type(f) != DictType:
raise ValueError, 'bad metainfo - bad file value'
length = f.get('length')
if type(length) not in ints or length < 0:
raise ValueError, 'bad metainfo - bad length'
path = f.get('path')
if type(path) != ListType or path == []:
raise ValueError, 'bad metainfo - bad path'
for p in path:
if StringType != type(p) != UnicodeType:
raise ValueError, 'bad metainfo - bad path dir'
#if not reg.match(p):
# raise ValueError, 'path %s disallowed for security reasons' % p
for i in xrange(len(files)):
for j in xrange(i):
if files[i]['path'] == files[j]['path']:
raise ValueError, 'bad metainfo - duplicate path'
def check_message(message):
if type(message) != DictType:
raise ValueError
check_info(message.get('info'))
if StringType != type(message.get('announce')) != UnicodeType:
raise ValueError
def check_peers(message):
if type(message) != DictType:
raise ValueError
if message.has_key('failure reason'):
if type(message['failure reason']) != StringType:
raise ValueError
return
peers = message.get('peers')
if peers is not None:
if type(peers) == ListType:
for p in peers:
if type(p) != DictType:
raise ValueError
if type(p.get('ip')) != StringType:
raise ValueError
port = p.get('port')
if type(port) not in ints or p <= 0:
raise ValueError
if p.has_key('peer id'):
id = p['peer id']
if type(id) != StringType or len(id) != 20:
raise ValueError
elif type(peers) != StringType or len(peers) % 6 != 0:
raise ValueError
# IPv6 Tracker extension. http://www.bittorrent.org/beps/bep_0007.html
peers6 = message.get('peers6')
if peers6 is not None:
if type(peers6) == ListType:
for p in peers6:
if type(p) != DictType:
raise ValueError
if type(p.get('ip')) != StringType:
raise ValueError
port = p.get('port')
if type(port) not in ints or p <= 0:
raise ValueError
if p.has_key('peer id'):
id = p['peer id']
if type(id) != StringType or len(id) != 20:
raise ValueError
elif type(peers6) != StringType or len(peers6) % 18 != 0:
raise ValueError
interval = message.get('interval', 1)
if type(interval) not in ints or interval <= 0:
raise ValueError
minint = message.get('min interval', 1)
if type(minint) not in ints or minint <= 0:
raise ValueError
if type(message.get('tracker id', '')) != StringType:
raise ValueError
npeers = message.get('num peers', 0)
if type(npeers) not in ints or npeers < 0:
raise ValueError
dpeers = message.get('done peers', 0)
if type(dpeers) not in ints or dpeers < 0:
raise ValueError
last = message.get('last', 0)
if type(last) not in ints or last < 0:
raise ValueError
| egbertbouman/tribler-g | Tribler/Core/BitTornado/BT1/btformats.py | Python | lgpl-2.1 | 5,210 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from .beta_distribution import BetaDistribution
__all__ = [
"BetaDistribution"
]
| mathLab/RBniCS | tutorials/10_weighted_uq/sampling/distributions/__init__.py | Python | lgpl-3.0 | 215 |
import celery.decorators
from celery.task.base import Task
task = celery.task
from celery.execute import send_task
Task = Task
CONFIG = None
| cloud9ers/j25framework | j25/tasks/__init__.py | Python | lgpl-3.0 | 142 |
#!/usr/bin/env python
# coding:utf-8
# Copyright (c) 2011, Vadim Velikodniy <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
import datetime, json, httplib, urlparse
import fotki.photo as photo
from fotki.auth import DEFAULT_AUTH
class CollectionException(Exception):
"""Класс-исключение для ошибок при работе с коллекцией."""
pass
class _Collection(object):
"""Класс, представляющий обобщённую коллекцию.
Позволяет получить общую информацию и список элементов."""
def __init__(self, url, auth=DEFAULT_AUTH):
"""Конструктор.
auth - «аутентификационный» объект,
url - ссылка на коллекцию."""
self._url = urlparse.urlparse(url)
self._auth = auth
self._entries = []
self._get_info()
#FIXME Вставить обработку ошибок
def _get_info(self):
"""Загружает информацию и список объектов коллекции."""
self._entries = []
connection = httplib.HTTPConnection(self._url.netloc)
headers = {'Accept': 'application/json'}
headers.update(self._auth.auth_header)
while True:
connection.request('GET', self._url.path, headers=headers)
response = connection.getresponse()
data = response.read()
self._info = json.loads(data)
self._entries.extend(self._info['entries'])
del self._info['entries']
if 'next' not in self._info['links']:
break
url = self._info['links']['next']
connection.close()
def _get_api_url(self):
"""Возвращает URL, связанный с коллекцией."""
return urlparse.urlunparse(self._url)
def _get_title(self):
return self._info['title']
title = property(_get_title, doc='Заголовок коллекции')
def _get_author(self):
return self._info['author']
author = property(_get_author,
doc='Имя пользователя - автора коллекции')
# URLs ---------------------------------------------------------------------
def _get_view_url(self):
return self._info['links']['alternate']
view_url = property(_get_view_url,
doc='Ссылка на страницу для просмотра коллекции')
# Datetime -----------------------------------------------------------------
# TODO Учитывать timezone
def _get_updating_time(self):
format_string = '%Y-%m-%dT%H:%M:%SZ'
return datetime.datetime.strptime(self._info['updated'], format_string)
updating_time = property(_get_updating_time,
doc='Дата и время обновления коллекции')
# Standard protocols -------------------------------------------------------
#TODO Выяснить, почему поле imageCount
#в отдаваемом сервером документе всегда равно 0.
def __len__(self):
"""Возвращает количество элементов в коллекции."""
return len(self._entries)
class PhotoCollection(_Collection):
"""Класс, представляющий общую коллекцию фотографий пользователя."""
def __init__(self, url, auth=DEFAULT_AUTH):
"""Конструктор."""
_Collection.__init__(self, url, auth)
#FIXME Вставить обработку ошибок загрузки (!= 201 CREATED).
#TODO Определять MIME-тип и отправлять его в заголовке.
def upload_photo(self, image_file, content_type = 'image/jpeg'):
"""Загружает новую фотографию и возвращает объект, связанный с ней.
После вызова этого метода можно откорректировать метаинформацию
фотографии и вызвать для неё метод класса Photo для сохранения на сервере.
После добавления фотографии список объектов обновляется и вновь
загруженная фотография не обязательно окажется в его конце.
image_file - файл с фотографией."""
headers = {
# 'Slug': 'filename.jpeg',
'Content-Type': content_type,
# 'Content-Length': len(data),
'Accept': 'application/json',
}
headers.update(self._auth.auth_header)
connection = httplib.HTTPConnection(self._url.netloc)
connection.request('POST', self._url.path, image_file, headers)
response = connection.getresponse()
response_data = response.read()
info = json.loads(response_data)
connection.close()
self._get_info()
return photo.Photo(info, self._auth)
# Sequence protocol --------------------------------------------------------
def __getitem__(self, key):
return photo.Photo(self._entries[key], self._auth)
def __delitem__(self, key):
photo.Photo(self._entries[key], self._auth).delete()
self._get_info()
def __iter__(self):
return (photo.Photo(e, self._auth) for e in self._entries)
class Album(PhotoCollection):
"""Класс, представляющий альбом."""
def __init__(self, url, auth=DEFAULT_AUTH):
"""Конструктор."""
PhotoCollection.__init__(self, url, auth)
def save(self):
"""Сохраняет информацию об альбоме на сервере."""
pass
def load(self):
"""Загружает информацию об альбоме с сервера."""
pass
def delete(self):
"""Удаляет альбом."""
pass
def _get_parent(self):
pass
def _set_parent(self, album):
pass
parent = property(_get_parent, _set_parent, doc='Родительский альбом')
def _get_title(self):
pass
def _set_title(self, text):
pass
title = property(_get_title, _set_title, doc='Заголовок альбома')
def _get_summary(self):
pass
def _set_summary(self, text):
pass
summary = property(_get_summary, _set_summary, doc='Описание альбома')
def _get_password(self):
pass
def _set_password(self, password):
pass
password = property(_get_password, _set_password, doc='Пароль к альбому')
class AlbumCollection(_Collection):
"""Класс, представляющий коллекцию альбомов пользователя."""
def __init__(self, url, auth=DEFAULT_AUTH):
_Collection.__init__(self, url, auth)
def create_album(self, title, parent=None):
pass
# Sequence protocol --------------------------------------------------------
def __getitem__(self, key):
return Album(self._entries[key]['links']['photos'], self._auth)
def __delitem__(self, key):
Album(self._entries[key]['links']['photos'], self._auth).delete()
self._get_info()
def __iter__(self):
return (Album(e['links']['photos'], self._auth) for e in self._entries)
#TODO Реализовать класс TagCollection
#FIXME Учесть, что у TagCollection нет entries
class TagCollection(_Collection):
"""Класс, представляющий коллекцию тегов пользователя."""
pass
| velikodniy/python-fotki | fotki/collection.py | Python | lgpl-3.0 | 8,440 |
"""
Copyright (C) 2014, 申瑞珉 (Ruimin Shen)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import numpy
def make_matrix(pathLink, pathMatrix, undigraph, delimiter='\t'):
links = numpy.loadtxt(pathLink, ndmin=2)
assert (links.shape[1] == 2)
links -= numpy.min(links)
count = numpy.max(links) + 1
m = numpy.zeros([count, count], dtype=bool)
for v1, v2 in links:
m[v1, v2] = True
if undigraph:
m[v2, v1] = True
for i in range(len(m)):
m[i, i] = True
numpy.savetxt(pathMatrix, m, fmt='%i', delimiter=delimiter)
def main():
if len(sys.argv) > 3:
make_matrix(sys.argv[1], sys.argv[2], sys.argv[3] == 'y')
else:
print('Wrong number of arguments')
if __name__ == '__main__':
main()
| O-T-L/PyOptimization | Data/CommunityDiscovery/make_matrix.py | Python | lgpl-3.0 | 1,369 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
from PySide2.QtWidgets import QGroupBox, QCheckBox, QGridLayout, QPushButton, QSpinBox, \
QLabel, QSlider
from PySide2.QtCore import Qt
class Zoom_UI(QGroupBox):
"""
A QGroupbox widget with all properties of a Visca camera
"""
def __init__(self, visca, v):
super(Zoom_UI, self).__init__()
self.setTitle('Zoom')
self.v = v
self.visca = visca
zoom_layout = QGridLayout()
visca.zoom_stop = QPushButton()
visca.zoom_stop.setText('Stop')
visca.zoom_stop.clicked.connect(self.on_zoom_stop_pressed)
visca.zoom_wide = QPushButton()
visca.zoom_wide.setText('Wide')
visca.zoom_wide.clicked.connect(self.on_zoom_wide_pressed)
visca.zoom_tele = QPushButton()
visca.zoom_tele.setText('Tele')
visca.zoom_tele.clicked.connect(self.on_zoom_tele_pressed)
visca.zoom_tele_speed = QSpinBox()
visca.zoom_tele_speed.valueChanged.connect(self.on_zoom_tele_speed_valueChanged)
visca.zoom_tele_speed_label = QLabel()
visca.zoom_tele_speed_label.setText('Tele Speed')
visca.zoom_wide_speed = QSpinBox()
visca.zoom_wide_speed.valueChanged.connect(self.on_zoom_wide_speed_valueChanged)
visca.zoom_wide_speed_label = QLabel()
visca.zoom_wide_speed_label.setText('Wide Speed')
visca.zoom = QSlider()
visca.zoom.setOrientation(Qt.Horizontal)
visca.zoom.setMinimum(0)
visca.zoom.setMaximum(16384)
visca.zoom_direct_value = QSpinBox()
visca.zoom_direct_value.setKeyboardTracking(0)
visca.zoom_label = QLabel()
visca.zoom_label.setText('Zooom Value')
visca.zoom.valueChanged.connect(visca.zoom_direct_value.setValue)
visca.zoom_direct_value.setMinimum(0)
visca.zoom_direct_value.setMaximum(65536)
visca.zoom_direct_value.valueChanged.connect(self.on_zoom_direct_valueChanged)
zoom_layout.addWidget(visca.zoom_wide_speed, 2, 1, 1, 1)
zoom_layout.addWidget(visca.zoom_tele_speed, 2, 3, 1, 1)
zoom_layout.addWidget(visca.zoom_label, 2, 4, 1, 1)
zoom_layout.addWidget(visca.zoom_wide_speed_label, 1, 1, 1, 1)
zoom_layout.addWidget(visca.zoom_tele_speed_label, 1, 3, 1, 1)
#zoom_layout.addWidget(self.zoom, 4, 2, 3, 1)
zoom_layout.addWidget(visca.zoom_wide, 3, 1, 1, 1)
zoom_layout.addWidget(visca.zoom_stop, 3, 2, 1, 1)
zoom_layout.addWidget(visca.zoom_tele, 3, 3, 1, 1)
zoom_layout.addWidget(visca.zoom_direct_value, 3, 4, 1, 1)
self.setLayout(zoom_layout)
def on_zoom_direct_valueChanged(self, zoom):
self.v.zoom = zoom
self.visca.zoom = zoom
def on_zoom_tele_pressed(self):
self.v.zoom_tele(self.visca.zoom_tele_speed)
def on_zoom_wide_pressed(self):
self.v.zoom_wide(self.visca.zoom_wide_speed)
def zoom_refresh(self):
zoom = self.v._query('zoom')
self.visca.zoom_direct_value.setValue(zoom)
def on_zoom_stop_pressed(self):
self.v.zoom_stop()
self.zoom_refresh()
def on_zoom_tele_speed_valueChanged(self, speed):
self.visca.zoom_tele_speed = speed
def on_zoom_wide_speed_valueChanged(self, speed):
self.visca.zoom_wide_speed = speed
| PixelStereo/viscam | src/zoom.py | Python | lgpl-3.0 | 3,377 |
#
# setup.py
#
"""
A (non-web) graphic user interface for interacting with AliMonitor
"""
import os
from setuptools import (setup, find_packages)
from importlib.machinery import SourceFileLoader
metadata_path = os.path.join(".", "alimaster", "metadata.py")
metadata = SourceFileLoader("metadata", metadata_path).load_module()
REQUIRES = [
'pillow',
'OpenSSL'
]
EXTRAS = {
':python_version=="3.3"': ['asyncio>=0.2.1']
}
PACKAGES = find_packages(exclude=['tests'])
SCRIPTS = [
'scripts/alianalysisbuilder',
'scripts/alimaster',
'scripts/alimaster-root-selector'
]
CLASSIFIERS = [
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: GNU Lesser General Public License v3 or"
" later (LGPLv3+)",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Topic :: Scientific/Engineering :: Physics",
"Natural Language :: English"
]
setup(
name=metadata.package,
version=metadata.version,
author=metadata.author,
author_email=metadata.author_email,
url=metadata.url,
license=metadata.license,
description=__doc__.strip(),
packages=PACKAGES,
scripts=SCRIPTS,
install_requires=REQUIRES,
extras_require=EXTRAS,
classifiers=CLASSIFIERS
)
| akubera/AliMaster | setup.py | Python | lgpl-3.0 | 1,311 |
from nodesk_template import model_manager
import nodesk_template.models
from django.core.management import call_command
model_manager.sync_model("./nodesk_template/template_yaml")
| NoDesk/NoDesk-Server | nodesk_server/test_data.py | Python | lgpl-3.0 | 181 |
from pycp2k.inputsection import InputSection
from ._each274 import _each274
from ._density_gradient1 import _density_gradient1
from ._dielectric_function1 import _dielectric_function1
from ._polarisation_potential1 import _polarisation_potential1
class _sccs1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each274()
self.DENSITY_GRADIENT = _density_gradient1()
self.DIELECTRIC_FUNCTION = _dielectric_function1()
self.POLARISATION_POTENTIAL = _polarisation_potential1()
self._name = "SCCS"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS'}
self._subsections = {'POLARISATION_POTENTIAL': 'POLARISATION_POTENTIAL', 'DENSITY_GRADIENT': 'DENSITY_GRADIENT', 'DIELECTRIC_FUNCTION': 'DIELECTRIC_FUNCTION', 'EACH': 'EACH'}
self._attributes = ['Section_parameters']
| SINGROUP/pycp2k | pycp2k/classes/_sccs1.py | Python | lgpl-3.0 | 1,144 |
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Matthias Luescher
#
# Authors:
# Matthias Luescher
#
# This file is part of edi.
#
# edi is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# edi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with edi. If not, see <http://www.gnu.org/licenses/>.
__all__ = ["bootstrap", "imageclean", "create"]
| lueschem/edi | edi/commands/imagecommands/__init__.py | Python | lgpl-3.0 | 806 |
# anxt/CameraSensor.py
# pyNXT - Python wrappers for aNXT
# Copyright (C) 2011 Janosch Gräf <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from io import IOBase
from .I2C import DEFAULT_I2C_ADDR
from .Libanxt import Libanxt
from .Sensor import DEFAULT_DIGITAL_PORT, DigitalSensor
from ctypes import byref, Structure, c_ubyte
# TODO split from C Colormap structure and put C structure into Libanxt (already is)
x = """
class Colormap(Structure):
FILE_SIG = b"#aNXTCam COLORMAP\n"
_fields_ = [("r", (16 * c_ubyte)),
("g", (16 * c_ubyte)),
("b", (16 * c_ubyte))]
@staticmethod
def load(file_or_path):
if (isinstance(file_or_path, IOBase)):
f = file_or_path
else:
f = open(file_or_path, "rb")
sig = f.readline()
if (sig!=Colormap.FILE_SIG):
raise ColormapError("Invalid signature")
r = tuple(f.read(16))
g = tuple(f.read(16))
b = tuple(f.read(16))
if (f!=file_or_path):
f.close()
return Colormap(r, g, b)
def save(self, file_or_path):
if (isinstance(file_or_path, IOBase)):
f = file_or_path
else:
f = open(file_or_path, "wb")
f.write(self.FILE_SIG)
f.write(bytes(self.r))
f.write(bytes(self.g))
f.write(bytes(self.b))
if (f!=file_or_path):
f.close()
def get_r(self, i):
return (self.r[i*2]<<8) | self.r[i*2+1]
def get_g(self, i):
return (self.g[i*2]<<8) | self.g[i*2+1]
def get_b(self, i):
return (self.b[i*2]<<8) | self.b[i*2+1]
def get_average_color(self, i):
r = self.get_r(i)
g = self.get_g(i)
b = self.get_b(i)
r_sum = r_n = 0
g_sum = g_n = 0
b_sum = b_n = 0
for i in range(16):
r_sum += ((r>>(16-i))&1)*i*16
r_n += 1
g_sum += ((g>>(16-i))&1)*i*16
g_n += 1
b_sum += ((b>>(16-i))&1)*i*16
b_n += 1
return (r_sum/r_n,
g_sum/g_n,
b_sum/b_n)
"""
class CameraSensor(DigitalSensor):
tracking_modes = {"OBJECT": 0x42,
"LINE": 0x4C}
def __init__(self, nxt, port = DEFAULT_DIGITAL_PORT, i2c_addr = DEFAULT_I2C_ADDR):
DigitalSensor.__init__(self, nxt, port, i2c_addr)
def read(self):
return self.get_objects()
def get_objects(self):
self.set_addr_param("cam")
n = int(self.nxt.libanxt.nxt_cam_num_objects(self.nxt.handle, self.port-1))
if (n>0):
objbuf = (Libanxt.CameraObject * n)()
self.nxt.wait_after_communication_command()
n = int(self.nxt.libanxt.nxt_cam_get_objects(self.nxt.handle, self.port-1, 0, n, objbuf))
if (n>0):
objects = []
for i in range(n):
objects.append((objbuf[i].color, (objbuf[i].x, objbuf[i].y), (objbuf[i].w, objbuf[i].h)))
return objects
elif (n==0):
return []
else:
return False
elif (n==0):
return []
else:
return False
def get_colormap(self):
self.set_addr_param("cam")
colormap = Libanxt.CameraColormap()
if (int(self.nxt.libanxt.nxt_cam_get_colormap(self.nxt.handle, self.port-1, byref(colormap)))==0):
c = ([], [], [])
for i in range(16):
c[0].append(colormap.r[i])
c[1].append(colormap.g[i])
c[2].append(colormap.b[i])
return tuple(c[0]), tuple(c[1]), tuple(c[2])
else:
return False
def enable_tracking(self, enable = True):
self.set_addr_param("cam")
self.nxt.libanxt.nxt_cam_enable_tracking(self.nxt.handle, self.port-1, int(enable))
def set_trackingmode(self, mode = "OBJECT"):
self.set_addr_param("cam")
self.nxt.libanxt.nxt_cam_set_trackingmode(self.nxt.handle, self.port-1, self.tracking_modes[mode])
def reset(self):
self.set_addr_param("cam")
self.nxt.libanxt.nxt_cam_reset(self.nxt.handle, self.port-1)
def enable_colorsort(self, enable = True):
self.set_addr_param("cam")
self.nxt.libanxt.nxt_cam_enable_colorsort(self.nxt.handle, self.port-1, int(enable))
| jgraef/pyNXT | anxt/CameraSensor.py | Python | lgpl-3.0 | 5,070 |
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK rtl-demo-app.
#
# REDHAWK rtl-demo-app is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK rtl-demo-app is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
from rtldevice import RTL2832U
from sim_rx_digitizer import sim_RX_DIGITIZER
from sim_fm_device import sim_FM_Device | RedhawkSDR/rtl-demo-app | server/devices/__init__.py | Python | lgpl-3.0 | 958 |
"""
unit tests for 'schema' module
"""
import unittest
from rez.vendor.schema.test_schema import TestSchema
if __name__ == '__main__':
unittest.main()
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| fnaum/rez | src/rez/tests/test_schema.py | Python | lgpl-3.0 | 851 |
__author__ = 'Stuart Gordon Reid'
__email__ = '[email protected]'
__website__ = 'http://www.stuartreid.co.za'
"""
This file contains a collection of Grid Elements which can be used by the primary window object. These files include,
GridElement :- Abstract Base Class & Canvas
TextGridElement :- Grid Element containing text
ImageGridElement :- Grid Element containing an image
"""
from Tkinter import *
from ttk import *
import abc
class GridElement():
"""
This class contains data for grid elements
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, x, y, x_length, y_length, colour="black"):
self.x, self.y, self.colour = x, y, colour
self.x_length, self.y_length = x_length, y_length
self.canvas = None
@abc.abstractmethod
def add_elements(self):
pass
class FormGridElement(GridElement):
"""
This class contains data for grid elements
"""
def __init__(self, x, y, x_length, y_length, colour="black"):
GridElement.__init__(self, x, y, x_length, y_length, colour)
def add_elements(self):
assert isinstance(self.canvas, Canvas)
button = Button(self.canvas, text="Quit")
label = Label(self.canvas, text="This is a label")
window_one = self.canvas.create_window(10, 10, anchor=NW, window=label)
window_two = self.canvas.create_window(100, 10, anchor=NW, window=button) | StuartGordonReid/Comp-Finance | Interface/GridElements.py | Python | lgpl-3.0 | 1,437 |
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.gepetto import Viewer
import stair_bauzil_hrp2_path as tp
import time
packageName = "hrp2_14_description"
meshPackageName = "hrp2_14_description"
rootJointType = "freeflyer"
##
# Information to retrieve urdf and srdf files.
urdfName = "hrp2_14"
urdfSuffix = "_reduced"
srdfSuffix = ""
fullBody = FullBody ()
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
fullBody.setJointBounds ("base_joint_xyz", [-0.135,2, -1, 1, 0, 2.2])
ps = tp.ProblemSolver( fullBody )
r = tp.Viewer (ps)
#~ AFTER loading obstacles
rLegId = '0rLeg'
rLeg = 'RLEG_JOINT0'
rLegOffset = [0,-0.105,0,]
rLegNormal = [0,1,0]
rLegx = 0.09; rLegy = 0.05
fullBody.addLimb(rLegId,rLeg,'',rLegOffset,rLegNormal, rLegx, rLegy, 10000, "manipulability", 0.1)
lLegId = '1lLeg'
lLeg = 'LLEG_JOINT0'
lLegOffset = [0,-0.105,0]
lLegNormal = [0,1,0]
lLegx = 0.09; lLegy = 0.05
fullBody.addLimb(lLegId,lLeg,'',lLegOffset,rLegNormal, lLegx, lLegy, 10000, "manipulability", 0.1)
rarmId = '3Rarm'
rarm = 'RARM_JOINT0'
rHand = 'RARM_JOINT5'
rArmOffset = [0,0,-0.1]
rArmNormal = [0,0,1]
rArmx = 0.024; rArmy = 0.024
#disabling collision for hook
fullBody.addLimb(rarmId,rarm,rHand,rArmOffset,rArmNormal, rArmx, rArmy, 10000, "manipulability", 0.05, "_6_DOF", True)
#~ AFTER loading obstacles
larmId = '4Larm'
larm = 'LARM_JOINT0'
lHand = 'LARM_JOINT5'
lArmOffset = [-0.05,-0.050,-0.050]
lArmNormal = [1,0,0]
lArmx = 0.024; lArmy = 0.024
#~ fullBody.addLimb(larmId,larm,lHand,lArmOffset,lArmNormal, lArmx, lArmy, 10000, 0.05)
rKneeId = '0RKnee'
rLeg = 'RLEG_JOINT0'
rKnee = 'RLEG_JOINT3'
rLegOffset = [0.105,0.055,0.017]
rLegNormal = [-1,0,0]
rLegx = 0.05; rLegy = 0.05
#~ fullBody.addLimb(rKneeId, rLeg,rKnee,rLegOffset,rLegNormal, rLegx, rLegy, 10000, 0.01)
#~
lKneeId = '1LKnee'
lLeg = 'LLEG_JOINT0'
lKnee = 'LLEG_JOINT3'
lLegOffset = [0.105,0.055,0.017]
lLegNormal = [-1,0,0]
lLegx = 0.05; lLegy = 0.05
#~ fullBody.addLimb(lKneeId,lLeg,lKnee,lLegOffset,lLegNormal, lLegx, lLegy, 10000, 0.01)
#~
#~ fullBody.runLimbSampleAnalysis(rLegId, "jointLimitsDistance", True)
#~ fullBody.runLimbSampleAnalysis(lLegId, "jointLimitsDistance", True)
#~ fullBody.client.basic.robot.setJointConfig('LARM_JOINT0',[1])
#~ fullBody.client.basic.robot.setJointConfig('RARM_JOINT0',[-1])
q_0 = fullBody.getCurrentConfig();
#~ fullBody.createOctreeBoxes(r.client.gui, 1, rarmId, q_0,)
q_init = fullBody.getCurrentConfig(); q_init[0:7] = tp.q_init[0:7]
q_goal = fullBody.getCurrentConfig(); q_goal[0:7] = tp.q_goal[0:7]
fullBody.setCurrentConfig (q_init)
q_init = [
0.1, -0.82, 0.648702, 1.0, 0.0 , 0.0, 0.0, # Free flyer 0-6
0.0, 0.0, 0.0, 0.0, # CHEST HEAD 7-10
0.261799388, 0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17, # LARM 11-17
0.261799388, -0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17, # RARM 18-24
0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0, # LLEG 25-30
0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0, # RLEG 31-36
]; r (q_init)
fullBody.setCurrentConfig (q_goal)
#~ r(q_goal)
q_goal = fullBody.generateContacts(q_goal, [0,0,1])
#~ r(q_goal)
fullBody.setStartState(q_init,[rLegId,lLegId]) #,rarmId,larmId])
fullBody.setEndState(q_goal,[rLegId,lLegId])#,rarmId,larmId])
#~
#~ configs = fullBody.interpolate(0.1)
configs = fullBody.interpolate(0.1)
#~ configs = fullBody.interpolate(0.15)
i = 0;
fullBody.draw(configs[i],r); i=i+1; i-1
r.loadObstacleModel ('hpp-rbprm-corba', "stair_bauzil", "contact")
#~ fullBody.exportAll(r, configs, 'stair_bauzil_hrp2_robust_2');
#~ fullBody.client.basic.robot.setJointConfig('LLEG_JOINT0',[-1])
#~ q_0 = fullBody.getCurrentConfig();
#~ fullBody.draw(q_0,r);
#~ print(fullBody.client.rbprm.rbprm.getOctreeTransform(rarmId, q_0))
#~
#~
#~ fullBody.client.basic.robot.setJointConfig('LLEG_JOINT0',[1])
#~ q_0 = fullBody.getCurrentConfig();
#~ fullBody.draw(q_0,r);
#~ print(fullBody.client.rbprm.rbprm.getOctreeTransform(rarmId, q_0))
#~ q_init = fullBody.generateContacts(q_init, [0,0,-1]); r (q_init)
#~ f1 = open("secondchoice","w+")
#~ f1 = open("hrp2_stair_not_robust_configs","w+")
#~ f1.write(str(configs))
#~ f1.close()
limbsCOMConstraints = { rLegId : {'file': "hrp2/RL_com.ineq", 'effector' : 'RLEG_JOINT5'},
lLegId : {'file': "hrp2/LL_com.ineq", 'effector' : 'LLEG_JOINT5'},
rarmId : {'file': "hrp2/RA_com.ineq", 'effector' : rHand} }
#~ larmId : {'file': "hrp2/LA_com.ineq", 'effector' : lHand} }
#~ fullBody.limbRRTFromRootPath(0,len(configs)-1,0,2)
from hpp.corbaserver.rbprm.tools.cwc_trajectory_helper import step, clean,stats, saveAllData, play_traj
from hpp.gepetto import PathPlayer
pp = PathPlayer (fullBody.client.basic, r)
def act(i, numOptim = 0, use_window = 0, friction = 0.5, optim_effectors = True, verbose = False, draw = False):
return step(fullBody, configs, i, numOptim, pp, limbsCOMConstraints, 0.4, optim_effectors = optim_effectors, time_scale = 20., useCOMConstraints = True, use_window = use_window,
verbose = verbose, draw = draw)
def play(frame_rate = 1./24.):
play_traj(fullBody,pp,frame_rate)
def saveAll(name):
saveAllData(fullBody, r, name)
def initConfig():
r.client.gui.setVisibility("hrp2_14", "ON")
tp.cl.problem.selectProblem("default")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hrp2_trunk_flexible", "OFF")
r(q_init)
def endConfig():
r.client.gui.setVisibility("hrp2_14", "ON")
tp.cl.problem.selectProblem("default")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hrp2_trunk_flexible", "OFF")
r(q_goal)
def rootPath():
tp.cl.problem.selectProblem("rbprm_path")
r.client.gui.setVisibility("hrp2_14", "OFF")
tp.r.client.gui.setVisibility("toto", "OFF")
r.client.gui.setVisibility("hyq", "OFF")
r.client.gui.setVisibility("hrp2_trunk_flexible", "ON")
tp.pp(0)
r.client.gui.setVisibility("hrp2_trunk_flexible", "OFF")
r.client.gui.setVisibility("hyq", "ON")
tp.cl.problem.selectProblem("default")
def genPlan():
r.client.gui.setVisibility("hrp2_14", "ON")
tp.cl.problem.selectProblem("default")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hrp2_trunk_flexible", "OFF")
global configs
start = time.clock()
configs = configs = fullBody.interpolate(0.1, True)
end = time.clock()
print "Contact plan generated in " + str(end-start) + "seconds"
def contactPlan():
tp.cl.problem.selectProblem("default")
r.client.gui.setVisibility("hrp2_14", "ON")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hrp2_trunk_flexible", "OFF")
for i in range(1,len(configs)):
r(configs[i]);
time.sleep(0.5)
def interpolate():
tp.cl.problem.selectProblem("default")
r.client.gui.setVisibility("hrp2_14", "ON")
tp.r.client.gui.setVisibility("toto", "OFF")
tp.r.client.gui.setVisibility("hrp2_trunk_flexible", "OFF")
for i in range(7,20):
act(i,1,optim_effectors=True)
def play(frame_rate = 1./24.):
play_traj(fullBody,pp,frame_rate)
def a():
print "initial configuration"
initConfig()
def b():
print "end configuration"
endConfig()
def c():
print "displaying root path"
rootPath()
def d():
print "computing contact plan"
genPlan()
def e():
print "displaying contact plan"
contactPlan()
def f():
print "computing feasible com trajectory"
interpolate()
def g():
print "playing feasible trajectory"
play()
print "Root path generated in " + str(tp.t) + " ms."
| mylene-campana/hpp-rbprm-corba | script/scenarios/demos/stair_bauzil_hrp2_interp.py | Python | lgpl-3.0 | 7,729 |
# -*- coding: utf-8 -*-
import json
class ItemMetaInformation(object):
def __init__(self, instance, d):
self.instance = instance
self.d = d
def __getitem__(self, key):
return self.d[key]
def __setitem__(self, key, value):
self.d[key] = value
self.instance.metafld = json.dumps(self.d)
# TODO y: I should check when Django commits changes to the
# database and fix this if it causes the ORM to issue a database
# query every time a property is set
self.instance.save()
def __delitem__(self, key):
raise NotImplementedError
def as_dict(self):
return self.d.copy()
def serialize(obj):
obj = obj or {}
# TODO y: Check if object is dictionary
return json.dumps(obj)
def deserialize(s):
s = s or '{}'
# TODO y: Check if object is string and raise a TypeError if not
return json.loads(s)
| ydm/django-cart2 | cart/meta.py | Python | lgpl-3.0 | 924 |
"""
Creates sheet music for the song "O My Beloved Father".
Notes
-----
#. Written by David C. Stauffer in April 2017.
"""
#%% Imports
import abjad as ab
#%% Script
if __name__ == '__main__':
pass
#%% Score
score = ab.Score([])
staff = ab.Staff([])
time_signature = ab.TimeSignature((6, 8))
ab.attach(time_signature, staff)
score.append(staff)
#%% Notes
measures = [ \
"c''8 c''8 c''8 e''4 b'8", \
"a'4. g'4.", \
"c''8 d''8 e''8 c''4 c'''8", \
"g''4. g''4 e''8", \
"g''4 d''8 f''4 e''8", \
"c''4. c''4 c''8", \
"c''8 d''8 e''8 c''4 b'8", \
"d''4. d''4 g'8", \
"c''4. e''8 d''8 c''8", \
"a'4. g'4.", \
"c''8 d''8 e''8 c''4 c'''8", \
"g''4. g''4 a''8", \
"c'''4 a''8 g''4 f''8", \
"g''4. e''4.", \
"c''8 d''8 e''8 c''4 a'8", \
"c''4. c''8 r8 a''8", \
"c'''4 a''8 g''4 f''8", \
"c'''4 g''8 f''4 e''8", \
"a''2.", \
"a''4 f''8 e''4 d''8", \
"c''4 r8 r4.", \
"c''8 d''8 e''8 c''4 c'''8", \
"g''4. g''4.", \
"c''8 d''8 e''8 c''4 a'8", \
"c''4 r8 r4.", \
"r2."]
measures = [ab.Measure((6, 8), x) for x in measures if x]
staff.extend(measures)
ab.show(score)
#%% Details
# dynamic = ab.Dynamic('pp')
# ab.attach(dynamic, upper_measures[0][0])
#
# dynamic = ab.Dynamic('mp')
# ab.attach(dynamic, upper_measures[1][1])
#
# dynamic = ab.Dynamic('pp')
# ab.attach(dynamic, lower_measures[0][1])
#
# dynamic = ab.Dynamic('mp')
# ab.attach(dynamic, lower_measures[1][3])
#
# score.add_final_bar_line()
#
# # make look like Bartok beams
# upper_leaves = list(ab.iterate(upper_staff).by_leaf())
# lower_leaves = list(ab.iterate(lower_staff).by_leaf())
#
# beam = ab.Beam()
# ab.attach(beam, upper_leaves[:4])
#
# beam = ab.Beam()
# ab.attach(beam, lower_leaves[1:5])
#
# beam = ab.Beam()
# ab.attach(beam, lower_leaves[6:10])
#
# # Now some slurs:
# slur = ab.Slur()
# ab.attach(slur, upper_leaves[:5])
#
# slur = ab.Slur()
# ab.attach(slur, upper_leaves[5:])
#
# slur = ab.Slur()
# ab.attach(slur, lower_leaves[1:6])
#
# # Hairpins:
# crescendo = ab.Crescendo()
# ab.attach(crescendo, upper_leaves[-7:-2])
#
# decrescendo = ab.Decrescendo()
# ab.attach(decrescendo, upper_leaves[-2:])
#
# # A ritardando marking above the last seven notes of the upper staff:
# markup = ab.Markup('ritard.')
# text_spanner = ab.spannertools.TextSpanner()
# ab.override(text_spanner).text_spanner.bound_details__left__text = markup
# ab.attach(text_spanner, upper_leaves[-7:])
#
# # And ties connecting the last two notes in each staff:
# tie = ab.Tie()
# ab.attach(tie, upper_leaves[-2:])
#
# note_1 = lower_staff[-2]['upper voice'][0]
# note_2 = lower_staff[-1]['upper voice'][0]
# notes = [note_1, note_2]
# tie = ab.Tie()
# ab.attach(tie, notes)
#
# # The final result:
# ab.show(score)
| DStauffman/dstauffman2 | dstauffman2/music/O_My_Beloved_Father.py | Python | lgpl-3.0 | 3,060 |
from ..base import ShopifyResource
from shopify import mixins
from collect import Collect
import product
class CustomCollection(ShopifyResource, mixins.Metafields, mixins.Events):
def products(self):
return product.Product.find(collection_id=self.id)
def add_product(self, product):
return Collect.create({'collection_id': self.id, 'product_id': product.id})
def remove_product(self, product):
collect = Collect.find_first(collection_id=self.id, product_id=product.id)
if collect:
collect.destroy()
| roninio/gae-shopify-python-boilerplate | shopify/resources/custom_collection.py | Python | lgpl-3.0 | 560 |
import json
import logging
import pymongo
import re
import time
from tweepy import OAuthHandler, Stream
from tweepy.streaming import StreamListener
import urllib2
class TwitterStreamListener(StreamListener):
def __init__(self, follow_id):
self.init_twitter(follow_id)
self.targets = [".*@mtb.com", ".*@mandtbank.com", ".*@wilmingtontrust.com"]
self.consumer_key = "mHvyc3nMzqqubhxfXm9tpkQUW"
self.consumer_secret = "ymV1XinAttCfdCOOwF22V3LnDiwrseczb5ybNdsDzfI1Zv9qzM"
self.access_token = "3892243054-D0aqk03JBlOnaOr58C8v07oHNGL7yPEmzRLrdNE"
self.access_token_secret = "zAkZqi5brWzCtADpsZGR6cmwh8meAVqnrSY1uCFJs1A9s"
self.init_db()
def build_mongo_document(self, target, feed_data, dump_id):
doc = dict()
doc = {"target": target,
"dump_id": dump_id,
"feed_data": feed_data}
return doc
def download_dump(self, url):
"""
Download a data dump and save it to a database.
@param url the URL of the data dump
@param doc_id the ID of the database record to which the data
dump will be added
@return None
"""
# Download the data dump
res = urllib2.urlopen(url)
data = res.read()
# Insert the dump into the database
doc = dict()
doc["url"] = url
doc["date_downloaded"] = time.strftime("%m/%d/%Y %H:%M:%S")
doc["data"] = data
doc_id = self.mongo_collection.insert_one(doc).inserted_id
logging.info("Saved data at %s to document with ID %s" % (url, doc_id))
return doc_id
def init_db(self):
self.mongo_client = pymongo.MongoClient("localhost", 27017)
self.mongo_db = self.mongo_client["arachnid"]
self.mongo_collection = self.mongo_db["dumpmon"]
def init_twitter(self, follow_id):
self.follow_id = follow_id
def listen(self):
auth = OAuthHandler(self.consumer_key, self.consumer_secret)
auth.set_access_token(self.access_token, self.access_token_secret)
stream = Stream(auth, self)
logging.info("Following Twitter feed for user ID %s" % self.follow_id)
stream.filter(follow=[self.follow_id])
def on_data(self, data):
parsed_json = json.loads(data)
logging.info("Received tweet with ID %s from user %s: %s" % \
(parsed_json["id"], parsed_json["user"]["screen_name"], \
parsed_json["text"]))
# Download data dump found in Tweet
dump_url = parsed_json["entities"]["urls"][0]["expanded_url"]
dump_id = self.download_dump(dump_url)
# Search data dump for targets
dump_doc = self.mongo_collection.find_one({"_id": dump_id})
found_one = False
for regex in self.targets:
if re.match(regex, dump_doc["data"]):
found_one = True
logging.critical("MATCH FOUND: target=%s, post=%s" % \
(regex, data))
doc = self.build_mongo_document(regex, data, dump_id)
doc_id = self.mongo_collection.insert_one(doc).inserted_id
logging.info("Stored matched Twitter post with ID %s" % doc_id)
# Remove the dump from the database if no matches were found
if not found_one:
self.mongo_collection.remove({"_id": dump_id})
logging.info("No matches found; removed dump with ID %s" % dump_id)
def on_error(self, status):
logging.warn("Error reading from Twitter feed. Status = %s" % status)
| dwindsor/arachnid-old | arachnid/twitter/streamlistener.py | Python | lgpl-3.0 | 3,635 |
import Queue
from src.utils import logger
class OperationQueue():
"""Simple queue specifically for server operations."""
def __init__(self):
self.queue = Queue.Queue()
self.op_in_progress = False
self.paused = False
def queue_dump(self):
return [op for op in self.queue.queue]
def remove(self, operation):
try:
logger.debug(
"Removing ({0}, {1}) from queue.".format(
operation.type, operation.id
)
)
self.queue.queue.remove(operation)
return True
except Exception as e:
logger.error(
"Failed to remove operation from queue: {0}".format(operation)
)
logger.exception(e)
return False
def put_non_duplicate(self, operation):
"""
Put the operation in queue if no other operation of the same type
exists.
"""
if operation.type in [op.type for op in self.queue_dump()]:
return False
return self.put(operation)
def _put_front(self, operation):
new_queue = Queue.Queue()
new_queue.put(operation)
for op in self.queue_dump():
new_queue.put(op)
# +1 to take in to account the newest operation added to the front
new_queue.unfinished_tasks = self.queue.unfinished_tasks + 1
self.queue = new_queue
def put(self, operation, put_front=False):
"""
Attempts to add an item to the queue.
Args:
operation (SofOperation): Operation to be added.
Kwargs:
put_front (bool): Determines whether the operation be placed
in the front of the queue or in the back.
Returns:
True if item was successfully added, false otherwise.
"""
result = False
try:
if operation:
if put_front:
self._put_front(operation)
else:
self.queue.put(operation)
result = True
try:
logger.debug(
"Added ({0}, {1}) to queue."
.format(operation.type, operation.id)
)
except Exception:
logger.debug(
"Added {0} to queue."
.format(operation)
)
except Queue.Full as e:
logger.error("Agent is busy. Ignoring operation.")
result = False
except Exception as e:
logger.error("Error adding operation to queue.")
logger.error("Message: %s" % e)
result = False
return result
def get(self):
"""
Attempts to get an operation from the queue if no operation is pending.
Returns:
The operation if it was successfully retrieved, None otherwise.
"""
operation = None
if (not self.op_in_progress) and (not self.paused):
try:
operation = self.queue.get_nowait()
self.op_in_progress = True
try:
logger.debug(
"Popping ({0}, {1}) from queue.".format(
operation.type, operation.id
)
)
except Exception:
logger.debug("Popping {0} from queue.".format(operation))
except Queue.Empty as e:
# logger.debug("Operations queue is empty.")
operation = None
except Exception as e:
logger.error("Error accessing operation queue.")
logger.error("Message: %s" % e)
operation = None
return operation
def done(self):
"""
Indicates that an operation is done.
@return: Nothing
"""
try:
logger.debug("Unfinished tasks before done: {0}".format(self.queue.unfinished_tasks))
self.queue.task_done()
logger.debug("Unfinished tasks after done: {0}".format(self.queue.unfinished_tasks))
except Exception as e:
logger.error("Error marking operation as done.")
logger.exception(e)
finally:
# Mark as false to continue processing operations
self.op_in_progress = False
def pause(self):
self.paused = True
def resume(self):
self.paused = False
def set_operation_caller(self, callback):
self.callback = callback
def _is_op_pending(self, running):
self.op_in_progress = running
| vFense/vFenseAgent-nix | agent/src/serveroperation/operationqueue.py | Python | lgpl-3.0 | 4,766 |
from config import settings
print settings
print settings['core']['location'] | jonshern/raspberrypi-indoorhealthmonitor | testing/configtest.py | Python | unlicense | 80 |
#The whole point of this file is that python27 uses "ConfigParser", and python34 uses "configparser"..... or something like that.
#This allows the script to run on all Python versions...I think...probably? At least 27 and 34.
#The only difference between the try-except is the import statement.
def login():
try:
import ConfigParser
config = ConfigParser.ConfigParser()
config.read('auth.ini')
info = {}
info['name'] = config.get('credentials', 'username')
info['password'] = config.get('credentials', 'password')
info['botname'] = config.get('credentials', 'botname')
return info
except:
import configparser
config = configparser.ConfigParser()
config.read('auth.ini')
info = {}
info['name'] = config.get('credentials', 'username')
info['password'] = config.get('credentials', 'password')
info['botname'] = config.get('credentials', 'botname')
return info
| xvang/reddit-bots | Karma_info_bot/login.py | Python | unlicense | 1,033 |
import logging
from app.models import Rc, Button, Node, Arduino, Radio
from app import db
from datetime import datetime
class RcHelper:
def __init__(self, rc_id = None):
self.set(rc_id)
def get(self):
return self.rc
def set(self, rc_id):
self.rc = Rc.query.filter_by(id = rc_id).first()
def getRcs(self):
rcs = []
for rc in Rc.query.order_by(Rc.order).all():
r = {
'id': rc.id,
'name': rc.name,
'icon': rc.icon,
'order': rc.order,
'public': rc.public
}
rcs.append(r)
return rcs
def createRc(self, params):
rc = Rc(name = params['name'],
icon = params['icon'],
order = params['order'],
public = params['public'],
timestamp = datetime.utcnow())
db.session.add(rc)
db.session.commit()
return {'id': rc.id,
'name': rc.name,
'icon': rc.icon,
'order': rc.order,
'public': rc.public}
def getRc(self):
if self.rc is None:
return None
return {'id': self.rc.id,
'name': self.rc.name,
'icon': self.rc.icon,
'order': self.rc.order,
'public': self.rc.public}
def updateRc(self, params):
if self.rc is None:
return None
self.rc.name = params['name']
self.rc.icon = params['icon']
self.rc.order = params['order']
self.rc.public = params['public']
self.rc.timestamp = datetime.utcnow()
db.session.commit()
return {'id': self.rc.id,
'name': self.rc.name,
'icon': self.rc.icon,
'order': self.rc.order,
'public': self.rc.public}
def deleteRc(self):
if self.rc is None:
return None
for btn in self.rc.buttons:
db.session.delete(btn)
db.session.commit()
db.session.delete(self.rc)
db.session.commit()
self.rc = None
return True
def getButtons(self):
if self.rc is None:
return None
buttons = []
for button in self.rc.buttons.order_by(Button.order_ver.asc(), Button.order_hor.asc()).all():
b = {'id': button.id,
'rc_id': button.rc_id,
'radio_id': button.radio_id,
'name': button.name,
'color': button.color,
'order_ver': button.order_ver,
'order_hor': button.order_hor,
'message': button.message,
'type': button.type}
buttons.append(b)
return buttons
class BgroupHelper:
def __init__(self, rc_id, group_id = None):
self.rc = Rc.query.filter_by(id = rc_id).first()
self.set(group_id)
def get(self):
return self.rc
def set(self, group_id):
if self.rc is None:
self.button = None
else:
self.button = self.rc.bgroups.filter((Bgroup.id == group_id)).first()
# def getRcs(self):
# rcs = []
# for rc in Rc.query.order_by(Rc.order).all():
# r = {
# 'id': rc.id,
# 'name': rc.name,
# 'icon': rc.icon,
# 'order': rc.order,
# 'public': rc.public
# }
# rcs.append(r)
# return rcs
# def createRc(self, params):
# rc = Rc(name = params['name'],
# icon = params['icon'],
# order = params['order'],
# public = params['public'],
# timestamp = datetime.utcnow())
# db.session.add(rc)
# db.session.commit()
# return {'id': rc.id,
# 'name': rc.name,
# 'icon': rc.icon,
# 'order': rc.order,
# 'public': rc.public}
# def getRc(self):
# if self.rc is None:
# return None
# return {'id': self.rc.id,
# 'name': self.rc.name,
# 'icon': self.rc.icon,
# 'order': self.rc.order,
# 'public': self.rc.public}
# def updateRc(self, params):
# if self.rc is None:
# return None
# self.rc.name = params['name']
# self.rc.icon = params['icon']
# self.rc.order = params['order']
# self.rc.public = params['public']
# self.rc.timestamp = datetime.utcnow()
# db.session.commit()
# return {'id': self.rc.id,
# 'name': self.rc.name,
# 'icon': self.rc.icon,
# 'order': self.rc.order,
# 'public': self.rc.public}
# def deleteRc(self):
# if self.rc is None:
# return None
# for btn in self.rc.buttons:
# db.session.delete(btn)
# db.session.commit()
# db.session.delete(self.rc)
# db.session.commit()
# self.rc = None
# return True
class ButtonHelper:
def __init__(self, btn_id = None):
self.set(btn_id)
def get(self):
return self.button
def set(self, btn_id):
self.button = Button.query.filter_by(id = btn_id).first() if btn_id else None
def createButton(self, params):
btn = Button(rc_id = params['rc_id'],
radio_id = params['radio_id'],
name = params['name'],
order_hor = params['order_hor'],
order_ver = params['order_ver'],
color = params['color'],
type = params['type'],
message = params['message'],
timestamp = datetime.utcnow())
db.session.add(btn)
db.session.commit()
self.button = btn
return {'id': btn.id,
'rc_id' : btn.rc_id,
'radio_id': btn.radio_id,
'name': btn.name,
'order_hor': btn.order_hor,
'order_ver': btn.order_ver,
'color': btn.color,
'type': btn.type,
'message': btn.message}
def getButton(self):
if self.button is None:
return None
return {'id': self.button.id,
'rc_id' : self.button.rc_id,
'radio_id': self.button.radio_id,
'name': self.button.name,
'order_hor': self.button.order_hor,
'order_ver': self.button.order_ver,
'color': self.button.color,
'message': self.button.message,
'type': self.button.type}
def updateButton(self, params):
if self.button is None:
return None
self.button.radio_id = params['radio_id']
self.button.name = params['name']
self.button.order_hor = params['order_hor']
self.button.order_ver = params['order_ver']
self.button.color = params['color']
self.button.type = params['type']
self.button.message = params['message']
self.button.timestamp = datetime.utcnow()
db.session.commit()
return {'id': self.button.id,
'rc_id' : self.button.rc_id,
'radio_id': self.button.radio_id,
'name': self.button.name,
'order_hor': self.button.order_hor,
'order_ver': self.button.order_ver,
'color': self.button.color,
'type': self.button.type,
'message': self.button.message}
def deleteButton(self):
if self.button is None:
return None
button = {'id': self.button.id,
'rc_id' : self.button.rc_id,
'radio_id': self.button.radio_id,
'name': self.button.name,
'order_hor': self.button.order_hor,
'order_ver': self.button.order_ver,
'color': self.button.color,
'type': self.button.type,
'message': self.button.message}
db.session.delete(self.button)
db.session.commit()
self.button = None
return button
# def getNode(self):
# if self.rc is None or self.button is None:
# return None
# return Node.query.filter_by(id = self.button.node_id).first()
def catchIrSignal(self, node_sevice, event):
node = Node.query.filter_by(id = self.button.node_id).first()
event.host_name = node.host_name
event.button_id = self.button.id
if node is not None and node_sevice.pushToNode(event):
return True
return False
def getHostName(self):
if self.button is None:
return None
radio = Radio.query.filter_by(id = self.button.radio_id).first()
return radio.arduino.node.host_name
class NodeHelper:
def __init__(self, node_id = None):
self.set(node_id)
def get(self):
return self.node
def set(self, node_id):
self.node = Node.query.filter_by(id = node_id).first() if node_id else None
def getNodes(self):
nodes = []
for node in Node.query.order_by(Node.order).all():
n = {'id': node.id,
'name': node.name,
'host_name': node.host_name,
'order': node.order}
nodes.append(n)
return nodes
def createNode(self, params):
node = Node(
# name = params['name'],
host_name = params['host_name'],
order = params['order'],
timestamp = datetime.utcnow())
db.session.add(node)
db.session.commit()
return {'id': node.id,
'name': node.name,
'host_name': node.host_name,
'order': node.order}
def getNode(self):
if self.node is None:
return None
return {'id': self.node.id,
# 'name': self.node.name,
'host_name': self.node.host_name,
'order': self.node.order}
def updateNode(self, params):
if self.node is None:
return None
# self.node.name = params['name']
self.node.host_name = params['host_name']
self.node.order = params['order']
self.node.timestamp = datetime.utcnow()
db.session.commit()
return {'id': self.node.id,
# 'name': self.node.name,
'host_name': self.node.host_name,
'order': self.node.order}
def deleteNode(self):
if self.node is None:
return None
for arduino in self.node.arduinos:
db.session.delete(arduino)
db.session.commit()
db.session.delete(self.node)
db.session.commit()
self.node = None
return True
def getNodeByName(self, host_name):
try:
node = Node.query.filter_by(host_name = host_name).first()
except Exception as e:
db.session.rollback()
logging.error('[helpers] db session error. rolled back')
logging.error(str(e))
return False
return node
class ArduinoHelper:
def __init__(self, arduino_id = None):
self.set(arduino_id)
def get(self):
return self.arduino
def getNode(self):
if self.arduino is None:
return None
return self.arduino.node
def set(self, arduino_id):
self.arduino = Arduino.query.filter_by(id = arduino_id).first() if arduino_id else None
def getArduinos(self):
arduinos = []
for arduino in Arduino.query.order_by(Arduino.order).all():
a = {'id': arduino.id,
'node_id': arduino.node_id,
'usb': arduino.usb,
'name': arduino.name,
'order': arduino.order}
arduinos.append(a)
return arduinos
def createArduino(self, params):
arduino = Arduino(node_id = params['node_id'],
usb = params['usb'],
name = params['name'],
order = params['order'],
timestamp = datetime.utcnow())
db.session.add(arduino)
db.session.commit()
self.arduino = arduino
return {'id': arduino.id,
'node_id': arduino.node_id,
'usb': arduino.usb,
'name': arduino.name,
'order': arduino.order}
def getArduino(self):
if self.arduino is None:
return None
return {'id': self.arduino.id,
'node_id': self.arduino.node_id,
'usb': self.arduino.usb,
'name': self.arduino.name,
'order': self.arduino.order}
def updateArduino(self, params):
if self.arduino is None:
return None
self.arduino.usb = params['usb']
self.arduino.name = params['name']
self.arduino.order = params['order']
self.arduino.timestamp = datetime.utcnow()
db.session.commit()
return {'id': self.arduino.id,
'node_id': self.arduino.node_id,
'usb': self.arduino.usb,
'name': self.arduino.name,
'order': self.arduino.order}
def deleteArduino(self):
if self.arduino is None:
return None
arduino = {'id': self.arduino.id,
'node_id': self.arduino.node_id,
'usb': self.arduino.usb,
'name': self.arduino.name,
'order': self.arduino.order}
db.session.delete(self.arduino)
db.session.commit()
self.arduino = None
return arduino
class RadioHelper:
def __init__(self, radio_id = None):
self.set(radio_id)
def get(self):
return self.radio
def set(self, radio_id):
self.radio = Radio.query.filter_by(id = radio_id).first()
def getRadios(self):
radios = []
for radio in Radio.query.order_by(Radio.order).all():
r = {'id': radio.id,
'arduino_id': radio.arduino_id,
'type': radio.type,
'pipe': radio.pipe,
'name': radio.name,
'on_request': radio.on_request,
'expired_after': radio.expired_after,
'enabled': radio.enabled,
'order': radio.order}
radios.append(r)
return radios
def createRadio(self, params):
radio = Radio(arduino_id = params['arduino_id'],
type = params['type'],
pipe = params['pipe'],
name = params['name'],
enabled = params['enabled'],
on_request = params['on_request'],
expired_after = params['expired_after'],
order = params['order'],
timestamp = datetime.utcnow())
db.session.add(radio)
db.session.commit()
return {'id': radio.id,
'arduino_id': radio.arduino_id,
'type': radio.type,
'pipe': radio.pipe,
'name': radio.name,
'on_request': radio.on_request,
'expired_after': radio.expired_after,
'enabled': radio.enabled,
'order': radio.order}
def getRadio(self):
if self.radio is None:
return None
return {'id': self.radio.id,
'arduino_id': self.radio.arduino_id,
'type': self.radio.type,
'pipe': self.radio.pipe,
'name': self.radio.name,
'on_request': self.radio.on_request,
'expired_after': self.radio.expired_after,
'enabled': self.radio.enabled,
'order': self.radio.order}
def updateRadio(self, params):
if self.radio is None:
return None
self.radio.arduino_id = params['arduino_id']
self.radio.type = params['type']
self.radio.pipe = params['pipe']
self.radio.name = params['name']
self.radio.on_request = params['on_request']
self.radio.expired_after = params['expired_after']
self.radio.enabled = params['enabled']
self.radio.order = params['order']
self.radio.timestamp = datetime.utcnow()
db.session.commit()
return {'id': self.radio.id,
'arduino_id': self.radio.arduino_id,
'type': self.radio.type,
'pipe': self.radio.pipe,
'name': self.radio.name,
'on_request': self.radio.on_request,
'expired_after': self.radio.expired_after,
'enabled': self.radio.enabled,
'order': self.radio.order}
def deleteRadio(self):
if self.radio is None:
return None
db.session.delete(self.radio)
db.session.commit()
self.radio = None
return True
def getByPipe(self, pipe):
return Radio.query.filter_by(pipe = pipe).first() | notfoundsam/raspberry | app/helpers.py | Python | unlicense | 17,532 |
# -*- coding: utf-8 -*-
"""
Default configuration which is always loaded
"""
import logging
DEBUG = False
LOGGING_SEVERITY = logging.WARNING
| tbalive/website | config/default.py | Python | unlicense | 143 |
import pyglet, util
pyglet.resource.path = ['../resources']
pyglet.resource.reindex()
player_image = pyglet.resource.image("player.png")
bullet_image = pyglet.resource.image("bullet.png")
asteroid_image = pyglet.resource.image("asteroid.png")
engine_image = pyglet.resource.image("engine_trail.png")
engine_image.anchor_x = engine_image.width * 1.5
engine_image.anchor_y = engine_image.height/2
util.center_image(player_image)
util.center_image(bullet_image)
util.center_image(asteroid_image) | CyanCorsair/asteroids | version_2/game/resources.py | Python | unlicense | 499 |
from django import forms
from blog.models import Entry, Comment, UserProfile
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
class LoginForm(forms.Form):
username = forms.CharField(max_length=25,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})) # NOQA
password = forms.CharField(required=True,
widget=forms.PasswordInput(attrs={'class': 'form-control'})) # NOQA
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if not user or not user.is_active:
raise forms.ValidationError("Sorry, that login was invalid. \
Please try again.")
return self.cleaned_data
def login(self, request):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
user = authenticate(username=username, password=password)
return user
class SignupForm(forms.ModelForm):
class Meta:
model = User
fields = ['username', 'email', 'password']
username = forms.CharField(required=True,
max_length=25,
widget=forms.TextInput(attrs={'class': 'form-control'})) # NOQA
email = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control'})) # NOQA
password = forms.CharField(required=True,
widget=forms.PasswordInput(attrs={'class': 'form-control'})) # NOQA
class PostForm(forms.ModelForm):
class Meta:
model = Entry
fields = ['title', 'post']
title = forms.CharField(required=True,
widget=forms.TextInput(attrs={'class': 'form-control'})) # NOQA
post = forms.CharField(max_length=2000,
required=True,
widget=forms.Textarea(attrs={'class': 'form-control'})) # NOQA
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('text',)
text = forms.CharField(max_length=2000,
label="Comment",
required=True,
widget=forms.Textarea(attrs={'class': 'form-control'})) # NOQA
class UpdateEmailForm(forms.ModelForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ('email',)
def save(self):
user = User.objects.get(pk=self.instance.pk)
user.email = self.cleaned_data['email']
user.save()
class UpdateAboutForm(forms.ModelForm):
about = forms.CharField(required=True,
widget=forms.Textarea(attrs={'class': 'form-control'})) # NOQA
class Meta:
model = UserProfile
fields = ('about',)
| paulmouzas/blogodrone | blog/forms.py | Python | unlicense | 3,005 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import re
from setuptools import setup
# Package metadata.
name = "db-dtypes"
description = "Pandas Data Types for SQL systems (BigQuery, Spanner)"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 4 - Beta"
dependencies = [
"packaging >= 17.0",
"pandas >= 0.24.2, < 2.0dev",
"pyarrow>=3.0.0, <8.0dev",
"numpy >= 1.16.6, < 2.0dev",
]
package_root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(package_root, "db_dtypes", "version.py")) as f:
version = re.search('__version__ = "([^"]+)"', f.read()).group(1)
def readme():
with io.open("README.rst", "r", encoding="utf8") as f:
return f.read()
setup(
name=name,
version=version,
description=description,
long_description=readme(),
long_description_content_type="text/x-rst",
author="The db-dtypes Authors",
author_email="[email protected]",
packages=["db_dtypes"],
url="https://github.com/googleapis/python-db-dtypes-pandas",
keywords=["sql", "pandas"],
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Topic :: Database :: Front-Ends",
],
platforms="Posix; MacOS X; Windows",
install_requires=dependencies,
python_requires=">=3.6, <3.11",
tests_require=["pytest"],
)
| googleapis/python-db-dtypes-pandas | setup.py | Python | apache-2.0 | 2,466 |
# -*- coding: utf-8 -*-
"""Base class for SQLite plugin path helper."""
import abc
class BaseSQLitePluginPathHelper(object):
"""Class representing the base class for the SQLite plugin path helper.
Attributes:
formatter_file_path (str): the path to the formatter file
parser_file_path (str): the path to the parser file
formatter_test_file_path (str): the path to the formatter test file
parser_test_file_path (str): the path to the parser test file
database_path (str): the path to the database file
parser_init_file_path (str): the path to the parser init file
formatter_init_file_path (str): the path to the formatter init file
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
"""Initializes the SQLite plugin helper."""
super().__init__()
self.formatter_file_path = None
self.parser_file_path = None
self.formatter_test_file_path = None
self.parser_test_file_path = None
self.database_path = None
self.parser_init_file_path = None
self.formatter_init_file_path = None
| ClaudiaSaxer/PlasoScaffolder | src/plasoscaffolder/bll/services/base_sqlite_plugin_path_helper.py | Python | apache-2.0 | 1,053 |
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import datetime
import traceback
from utils.cloudFoundryEnvReader import CloudFoundryEnvReader
from rulesProcessor import RulesProcessor
from pyspark import SparkContext
from config import Config
from version import VERSION
if __name__ == "__main__":
print("Rule Engine {0} v. - job is starting ...".format(VERSION))
sc = SparkContext(appName="RuleEngineSparkContext")
try:
cf_reader = CloudFoundryEnvReader((sys.argv[2]))
config = Config(cf_reader=cf_reader)
rule_processor = RulesProcessor(config=config, spark_context=sc)
rule_processor.process_rules()
except Exception, e:
print("Spark job failed", e)
traceback.print_exc()
finally:
sc.stop()
print("Finish time: " + str(datetime.datetime.utcnow().isoformat()))
| enableiot/iotanalytics-rule-engine | job.py | Python | apache-2.0 | 1,400 |
# -*- coding: utf-8 -*-
"""
requests_toolbelt.multipart
===========================
This holds all of the implementation details of the MultipartEncoder
"""
from requests.utils import super_len
from requests.packages.urllib3.filepost import iter_field_objects
from uuid import uuid4
import io
def encode_with(string, encoding):
"""Encoding ``string`` with ``encoding`` if necessary.
:param str string: If string is a bytes object, it will not encode it.
Otherwise, this function will encode it with the provided encoding.
:param str encoding: The encoding with which to encode string.
:returns: encoded bytes object
"""
if string and not isinstance(string, bytes):
return string.encode(encoding)
return string
class MultipartEncoder(object):
"""
The ``MultipartEncoder`` oject is a generic interface to the engine that
will create a ``multipart/form-data`` body for you.
The basic usage is::
import requests
from requests_toolbelt import MultipartEncoder
encoder = MultipartEncoder({'field': 'value',
'other_field', 'other_value'})
r = requests.post('https://httpbin.org/post', data=encoder,
headers={'Content-Type': encoder.content_type})
If you do not need to take advantage of streaming the post body, you can
also do::
r = requests.post('https://httpbin.org/post',
data=encoder.to_string(),
headers={'Content-Type': encoder.content_type})
"""
def __init__(self, fields, boundary=None, encoding='utf-8'):
#: Boundary value either passed in by the user or created
self.boundary_value = boundary or uuid4().hex
self.boundary = '--{0}'.format(self.boundary_value)
#: Default encoding
self.encoding = encoding
#: Fields passed in by the user
self.fields = fields
#: State of streaming
self.finished = False
# Most recently used data
self._current_data = None
# Length of the body
self._len = None
# Our buffer
self._buffer = CustomBytesIO(encoding=encoding)
# This a list of two-tuples containing the rendered headers and the
# data.
self._fields_list = []
# Iterator over the fields so we don't lose track of where we are
self._fields_iter = None
# Pre-render the headers so we can calculate the length
self._render_headers()
def __len__(self):
if self._len is None:
self._calculate_length()
return self._len
def _calculate_length(self):
boundary_len = len(self.boundary) # Length of --{boundary}
self._len = 0
for (header, data) in self._fields_list:
# boundary length + header length + body length + len('\r\n') * 2
self._len += boundary_len + len(header) + super_len(data) + 4
# Length of trailing boundary '--{boundary}--\r\n'
self._len += boundary_len + 4
@property
def content_type(self):
return str('multipart/form-data; boundary={0}'.format(
self.boundary_value
))
def to_string(self):
return self.read()
def read(self, size=None):
"""Read data from the streaming encoder.
:param int size: (optional), If provided, ``read`` will return exactly
that many bytes. If it is not provided, it will return the
remaining bytes.
:returns: bytes
"""
if size is not None:
size = int(size) # Ensure it is always an integer
bytes_length = len(self._buffer) # Calculate this once
size -= bytes_length if size > bytes_length else 0
self._load_bytes(size)
return self._buffer.read(size)
def _load_bytes(self, size):
written = 0
orig_position = self._buffer.tell()
# Consume previously unconsumed data
written += self._consume_current_data(size)
while size is None or written < size:
next_tuple = self._next_tuple()
if not next_tuple:
self.finished = True
break
headers, data = next_tuple
# We have a tuple, write the headers in their entirety.
# They aren't that large, if we write more than was requested, it
# should not hurt anyone much.
written += self._buffer.write(encode_with(headers, self.encoding))
self._current_data = coerce_data(data, self.encoding)
if size is not None and written < size:
size -= written
written += self._consume_current_data(size)
self._buffer.seek(orig_position, 0)
self._buffer.smart_truncate()
def _consume_current_data(self, size):
written = 0
# File objects need an integer size
if size is None:
size = -1
if self._current_data is None:
written = self._buffer.write(
encode_with(self.boundary, self.encoding)
)
written += self._buffer.write(encode_with('\r\n', self.encoding))
elif (self._current_data is not None and
super_len(self._current_data) > 0):
written = self._buffer.write(self._current_data.read(size))
if super_len(self._current_data) == 0 and not self.finished:
written += self._buffer.write(
encode_with('\r\n{0}\r\n'.format(self.boundary),
self.encoding)
)
return written
def _next_tuple(self):
next_tuple = tuple()
try:
# Try to get another field tuple
next_tuple = next(self._fields_iter)
except StopIteration:
# We reached the end of the list, so write the closing
# boundary. The last file tuple wrote a boundary like:
# --{boundary}\r\n, so move back two characters, truncate and
# write the proper ending.
if not self.finished:
self._buffer.seek(-2, 1)
self._buffer.truncate()
self._buffer.write(encode_with('--\r\n', self.encoding))
return next_tuple
def _render_headers(self):
e = self.encoding
iter_fields = iter_field_objects(self.fields)
self._fields_list = [
(f.render_headers(), readable_data(f.data, e)) for f in iter_fields
]
self._fields_iter = iter(self._fields_list)
def readable_data(data, encoding):
if hasattr(data, 'read'):
return data
return CustomBytesIO(data, encoding)
def coerce_data(data, encoding):
if not isinstance(data, CustomBytesIO):
if hasattr(data, 'getvalue'):
return CustomBytesIO(data.getvalue(), encoding)
if hasattr(data, 'fileno'):
return FileWrapper(data)
return data
class CustomBytesIO(io.BytesIO):
def __init__(self, buffer=None, encoding='utf-8'):
buffer = encode_with(buffer, encoding)
super(CustomBytesIO, self).__init__(buffer)
def _get_end(self):
current_pos = self.tell()
self.seek(0, 2)
length = self.tell()
self.seek(current_pos, 0)
return length
def __len__(self):
length = self._get_end()
return length - self.tell()
def smart_truncate(self):
to_be_read = len(self)
already_read = self._get_end() - to_be_read
if already_read >= to_be_read:
old_bytes = self.read()
self.seek(0, 0)
self.truncate()
self.write(old_bytes)
self.seek(0, 0) # We want to be at the beginning
class FileWrapper(object):
def __init__(self, file_object):
self.fd = file_object
def __len__(self):
return super_len(self.fd) - self.fd.tell()
def read(self, length=-1):
return self.fd.read(length)
| czgu/metaHack | env/lib/python2.7/site-packages/requests_toolbelt/multipart.py | Python | apache-2.0 | 8,075 |
#!/usr/bin/env python2.7
"""opencachemongodb.py - Manages the state of the node contents using a MongoDB database."""
import time
import pymongo
TAG = 'state'
class State:
def __init__(self, node):
"""Initialise state instance with useful objects.
Instantiated controller and configuration objects are passed for use within this instance.
Try connecting to the database. Continue to do so until database information is returned
(and the connection is therefore successful).
"""
self._node = node
database_test = None
while (database_test == None):
try:
self._client = pymongo.MongoClient(self._node.config['database_host'], int(self._node.config['database_port']))
self._database = self._client[self._node.config['database_name']]
database_test = self._database.command("serverStatus")
except Exception as e:
self._node.print_warn(TAG, "Could not connect to MongoDB database, retrying in 15 seconds.")
time.sleep(15)
def create(self, document):
return self._database.content.insert(document, upsert=True)
def remove(self, document):
return self._database.content.remove(document)
def lookup(self, document):
result = self._database.content.find(document)
result_obj = []
for test in result:
result_obj.append(test)
return result_obj
| opencache-project/opencache-node | opencache/node/state/opencachemongodb.py | Python | apache-2.0 | 1,488 |
"""
Utility functions for dealing with files
"""
import logging
import hashlib
__author__ = 'Stephen Brown (Little Fish Solutions LTD)'
log = logging.getLogger(__name__)
def extension_from_filename(filename):
return filename.rsplit('.')[-1]
def read_file(filename):
with open(filename, 'rb') as f:
file_data = f.read()
return file_data
def write_file(filename, data):
with open(filename, 'wb') as f:
f.write(data)
def file_md5sum(filename):
"""
:param filename: The filename of the file to process
:returns: The MD5 hash of the file
"""
hash_md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(1024 * 4), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def bytes_md5sum(bytes_):
hash_md5 = hashlib.md5()
hash_md5.update(bytes_)
return hash_md5.hexdigest()
| stevelittlefish/littlefish | littlefish/fileutil.py | Python | apache-2.0 | 908 |
#!/usr/bin/python
# Copyright (c) Facebook, Inc. and its affiliates.
"""These are the org-specific AutoDMG package building tools."""
import os
import sys
import tempfile
import shutil
from autodmg_utility import build_pkg, run, populate_ds_repo, pkgbuild
sys.path.append('/Library/CPE/lib/flib/modules')
try:
import FoundationPlist as plistlib
except ImportError:
print "Using plistlib"
import plistlib
# global DESTINATION
DESTINATION = '/Library/AutoDMG'
# Sample package construction list
# Each dict contains the information necessary to build a package from a given
# source, with receipt and file name, and potentially a target.
PKG_LIST = [
# Wallpapers are present at image time
{
'pkg_name': 'cpe_wallpapers',
'source': '/Users/Shared/Wallpapers',
'receipt': 'com.facebook.cpe.wallpapers',
'comment': 'Building Wallpapers package'
},
# Suppress Diagnostics prompt
{
'pkg_name': 'suppress_diagnostics',
'source': '/Library/AutoDMG/additions/diagnostics',
'receipt': 'com.facebook.cpe.suppress_diagnostics',
'comment': 'Building Diagnostic Suppression package',
'target': '/Library/Application Support/CrashReporter'
},
]
def build_bare_dmg(source, cache, logpath, loglevel, repo_path):
"""Build a bare OS DMG for Donation/bare usage."""
dmg_output_path = os.path.join(cache, 'Bare.hfs.dmg')
if os.path.isfile(dmg_output_path):
print "Donation image already found, not building.\n"
return
print "Creating AutoDMG-donation.adtmpl."
templatepath = os.path.join(cache, 'AutoDMG-bare.adtmpl')
plist = dict()
plist["ApplyUpdates"] = True
plist["SourcePath"] = source
plist["TemplateFormat"] = "1.0"
plist["VolumeName"] = "Macintosh HD"
# Complete the AutoDMG-donation.adtmpl template
plistlib.writePlist(plist, templatepath)
autodmg_cmd = [
'/Applications/AutoDMG.app/Contents/MacOS/AutoDMG'
]
if os.getuid() == 0:
# We are running as root
print "Running as root."
autodmg_cmd.append('--root')
logfile = os.path.join(logpath, 'bare.log')
# Now kick off the AutoDMG build
print "Building bare image..."
if os.path.isfile(dmg_output_path):
os.remove(dmg_output_path)
cmd = autodmg_cmd + [
'-L', loglevel,
'-l', logfile,
'build', templatepath,
'--download-updates',
'-o', dmg_output_path]
run(cmd)
print "Moving bare image to DS Repo."
populate_ds_repo(dmg_output_path, repo_path)
# local management functions
def munki_bootstrap(cache_path):
"""Build a Munki bootstrap package."""
pkg_output_file = os.path.join(cache_path, 'munki_bootstrap.pkg')
if not os.path.isfile(pkg_output_file):
print "Building Munki bootstrap package..."
temp_dir = tempfile.mkdtemp(prefix='munkiboot', dir='/tmp')
shared = os.path.join(temp_dir, 'Users/Shared')
os.makedirs(shared)
open(os.path.join(
shared, '.com.googlecode.munki.checkandinstallatstartup'
), 'a').close()
pkgbuild(
temp_dir,
'com.facebook.cpe.munki.bootstrap',
'1.0',
pkg_output_file
)
shutil.rmtree(temp_dir, ignore_errors=True)
if os.path.isfile(pkg_output_file):
return pkg_output_file
# If we failed for some reason, return None
return None
# Package already exists
return pkg_output_file
def suppress_registration(cache_path):
"""Build a package to suppress Setup Assistant, returns path to it."""
pkg_output_file = os.path.join(cache_path, 'suppress_registration.pkg')
if not os.path.isfile(pkg_output_file):
print "Building registration suppression package..."
temp_dir = tempfile.mkdtemp(prefix='suppressreg', dir='/tmp')
receipt = os.path.join(temp_dir, 'Library/Receipts')
os.makedirs(receipt)
open(os.path.join(receipt, '.SetupRegComplete'), 'a').close()
vardb = os.path.join(temp_dir, 'private/var/db/')
os.makedirs(vardb)
open(os.path.join(vardb, '.AppleSetupDone'), 'a').close()
pkgbuild(
temp_dir,
'com.facebook.cpe.suppress_registration',
'1.0',
pkg_output_file
)
shutil.rmtree(temp_dir, ignore_errors=True)
if os.path.isfile(pkg_output_file):
return pkg_output_file
# If we failed for some reason, return None
return None
# Package already exists
return pkg_output_file
def run_unique_code(args):
"""Run any special code or builds.
Arguments from the script are passed in.
Return a list of any packages you want included in the additions.
"""
pkg_list = []
# EXAMPLE ORGANIZATION-UNIQUE CODE:
# Perhaps you want to build a bunch of extra packages to include.
# You could use the PKG_LIST list above to set up your package building.
# ********
# for package in PKG_LIST:
# pkg_list.append(
# build_pkg(
# package['source'],
# package['pkg_name'],
# package['receipt'],
# package.get('target', package['source']),
# DESTINATION,
# package['comment']
# )
# )
# Each package needs to be added to the pkg_list to be returned,
# so it can be added to the overall additions list.
# ********
# EXAMPLE CUSTOM PACKAGE FUNCTIONS
# You can create your own functions for building packages, and
# include those too. Append each package to pkg_list:
# ********
# registration_pkg = suppress_registration(args.cache)
# if registration_pkg:
# pkg_list.append(registration_pkg)
# munki_bootstrap_pkg = munki_bootstrap(args.cache)
# if munki_bootstrap_pkg:
# pkg_list.append(munki_bootstrap_pkg)
# ********
# EXAMPLE BARE IMAGE:
# If you want to build your own bare/thin image, using just the OS,
# use the build_bare_dmg() function:
# ********
# build_bare_dmg(args.source, args.cache, args.logpath,
# str(args.loglevel), args.dsrepo)
# ********
return pkg_list
if __name__ == '__main__':
run_unique_code({})
| facebook/IT-CPE | legacy/autodmg_cache_builder/autodmg_org.py | Python | apache-2.0 | 5,866 |
from child_kinect import *
from transformer import *
from nao_move import * | CuriosityLabTAU/physicial_curiosity | curious_game/__init__.py | Python | apache-2.0 | 75 |
import os
import time
import json
import codecs
import elasticsearch
import progressbar
from backports import csv
from functools import wraps
FLUSH_BUFFER = 1000 # Chunk of docs to flush in temp file
CONNECTION_TIMEOUT = 120
TIMES_TO_TRY = 3
RETRY_DELAY = 60
META_FIELDS = [u'_id', u'_index', u'_score', u'_type']
# Retry decorator for functions with exceptions
def retry(ExceptionToCheck, tries=TIMES_TO_TRY, delay=RETRY_DELAY):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries = tries
while mtries > 0:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
print(e)
print('Retrying in {} seconds ...'.format(delay))
time.sleep(delay)
mtries -= 1
else:
print('Done.')
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
print('Fatal Error: {}'.format(e))
exit(1)
return f_retry
return deco_retry
class Es2csv:
def __init__(self, opts):
self.opts = opts
self.num_results = 0
self.scroll_ids = []
self.scroll_time = '30m'
self.csv_headers = list(META_FIELDS) if self.opts.meta_fields else []
self.tmp_file = '{}.tmp'.format(opts.output_file)
@retry(elasticsearch.exceptions.ConnectionError, tries=TIMES_TO_TRY)
def create_connection(self):
es = elasticsearch.Elasticsearch(self.opts.url, timeout=CONNECTION_TIMEOUT, http_auth=self.opts.auth,
verify_certs=self.opts.verify_certs, ca_certs=self.opts.ca_certs,
client_cert=self.opts.client_cert, client_key=self.opts.client_key)
es.cluster.health()
self.es_conn = es
@retry(elasticsearch.exceptions.ConnectionError, tries=TIMES_TO_TRY)
def check_indexes(self):
indexes = self.opts.index_prefixes
if '_all' in indexes:
indexes = ['_all']
else:
indexes = [index for index in indexes if self.es_conn.indices.exists(index)]
if not indexes:
print('Any of index(es) {} does not exist in {}.'.format(', '.join(self.opts.index_prefixes), self.opts.url))
exit(1)
self.opts.index_prefixes = indexes
@retry(elasticsearch.exceptions.ConnectionError, tries=TIMES_TO_TRY)
def search_query(self):
@retry(elasticsearch.exceptions.ConnectionError, tries=TIMES_TO_TRY)
def next_scroll(scroll_id):
return self.es_conn.scroll(scroll=self.scroll_time, scroll_id=scroll_id)
search_args = dict(
index=','.join(self.opts.index_prefixes),
sort=','.join(self.opts.sort),
scroll=self.scroll_time,
size=self.opts.scroll_size,
terminate_after=self.opts.max_results
)
if self.opts.doc_types:
search_args['doc_type'] = self.opts.doc_types
if self.opts.query.startswith('@'):
query_file = self.opts.query[1:]
if os.path.exists(query_file):
with codecs.open(query_file, mode='r', encoding='utf-8') as f:
self.opts.query = f.read()
else:
print('No such file: {}.'.format(query_file))
exit(1)
if self.opts.raw_query:
try:
query = json.loads(self.opts.query)
except ValueError as e:
print('Invalid JSON syntax in query. {}'.format(e))
exit(1)
search_args['body'] = query
else:
query = self.opts.query if not self.opts.tags else '{} AND tags: ({})'.format(
self.opts.query, ' AND '.join(self.opts.tags))
search_args['q'] = query
if '_all' not in self.opts.fields:
search_args['_source_include'] = ','.join(self.opts.fields)
self.csv_headers.extend([unicode(field, "utf-8") for field in self.opts.fields if '*' not in field])
if self.opts.debug_mode:
print('Using these indices: {}.'.format(', '.join(self.opts.index_prefixes)))
print('Query[{0[0]}]: {0[1]}.'.format(
('Query DSL', json.dumps(query, ensure_ascii=False).encode('utf8')) if self.opts.raw_query else ('Lucene', query))
)
print('Output field(s): {}.'.format(', '.join(self.opts.fields)))
print('Sorting by: {}.'.format(', '.join(self.opts.sort)))
res = self.es_conn.search(**search_args)
self.num_results = res['hits']['total']
print('Found {} results.'.format(self.num_results))
if self.opts.debug_mode:
print(json.dumps(res, ensure_ascii=False).encode('utf8'))
if self.num_results > 0:
codecs.open(self.opts.output_file, mode='w', encoding='utf-8').close()
codecs.open(self.tmp_file, mode='w', encoding='utf-8').close()
hit_list = []
total_lines = 0
widgets = ['Run query ',
progressbar.Bar(left='[', marker='#', right=']'),
progressbar.FormatLabel(' [%(value)i/%(max)i] ['),
progressbar.Percentage(),
progressbar.FormatLabel('] [%(elapsed)s] ['),
progressbar.ETA(), '] [',
progressbar.FileTransferSpeed(unit='docs'), ']'
]
bar = progressbar.ProgressBar(widgets=widgets, maxval=self.num_results).start()
while total_lines != self.num_results:
if res['_scroll_id'] not in self.scroll_ids:
self.scroll_ids.append(res['_scroll_id'])
if not res['hits']['hits']:
print('Scroll[{}] expired(multiple reads?). Saving loaded data.'.format(res['_scroll_id']))
break
for hit in res['hits']['hits']:
total_lines += 1
bar.update(total_lines)
hit_list.append(hit)
if len(hit_list) == FLUSH_BUFFER:
self.flush_to_file(hit_list)
hit_list = []
if self.opts.max_results:
if total_lines == self.opts.max_results:
self.flush_to_file(hit_list)
print('Hit max result limit: {} records'.format(self.opts.max_results))
return
res = next_scroll(res['_scroll_id'])
self.flush_to_file(hit_list)
bar.finish()
def flush_to_file(self, hit_list):
def to_keyvalue_pairs(source, ancestors=[], header_delimeter='.'):
def is_list(arg):
return type(arg) is list
def is_dict(arg):
return type(arg) is dict
if is_dict(source):
for key in source.keys():
to_keyvalue_pairs(source[key], ancestors + [key])
elif is_list(source):
if self.opts.kibana_nested:
[to_keyvalue_pairs(item, ancestors) for item in source]
else:
[to_keyvalue_pairs(item, ancestors + [str(index)]) for index, item in enumerate(source)]
else:
header = header_delimeter.join(ancestors)
if header not in self.csv_headers:
self.csv_headers.append(header)
try:
out[header] = '{}{}{}'.format(out[header], self.opts.delimiter, source)
except:
out[header] = source
with codecs.open(self.tmp_file, mode='a', encoding='utf-8') as tmp_file:
for hit in hit_list:
out = {field: hit[field] for field in META_FIELDS} if self.opts.meta_fields else {}
if '_source' in hit and len(hit['_source']) > 0:
to_keyvalue_pairs(hit['_source'])
tmp_file.write('{}\n'.format(json.dumps(out)))
tmp_file.close()
def write_to_csv(self):
if self.num_results > 0:
self.num_results = sum(1 for line in codecs.open(self.tmp_file, mode='r', encoding='utf-8'))
if self.num_results > 0:
output_file = codecs.open(self.opts.output_file, mode='a', encoding='utf-8')
csv_writer = csv.DictWriter(output_file, fieldnames=self.csv_headers)
csv_writer.writeheader()
timer = 0
widgets = ['Write to csv ',
progressbar.Bar(left='[', marker='#', right=']'),
progressbar.FormatLabel(' [%(value)i/%(max)i] ['),
progressbar.Percentage(),
progressbar.FormatLabel('] [%(elapsed)s] ['),
progressbar.ETA(), '] [',
progressbar.FileTransferSpeed(unit='lines'), ']'
]
bar = progressbar.ProgressBar(widgets=widgets, maxval=self.num_results).start()
for line in codecs.open(self.tmp_file, mode='r', encoding='utf-8'):
timer += 1
bar.update(timer)
csv_writer.writerow(json.loads(line))
output_file.close()
bar.finish()
else:
print('There is no docs with selected field(s): {}.'.format(','.join(self.opts.fields)))
os.remove(self.tmp_file)
def clean_scroll_ids(self):
try:
self.es_conn.clear_scroll(body=','.join(self.scroll_ids))
except:
pass
| taraslayshchuk/es2csv | es2csv.py | Python | apache-2.0 | 9,872 |
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard.dashboards.project.access_and_security.keypairs.tables import KeypairsTable as OldKeypairsTable
from tukey.cloud_attribute import get_cloud, get_cloud_id
class KeypairsTable(OldKeypairsTable):
# Thie should be somewhere else but I just don't know where
# mgreenway
cloud = tables.Column(get_cloud, verbose_name=_("Resource"))
#end modified section mgreenway
def get_object_id(self, keypair):
return get_cloud_id(keypair) + '-' + keypair.name
Meta = OldKeypairsTable.Meta
| LabAdvComp/tukey_portal | tukey/dashboards/project/access_and_security/keypairs/tables.py | Python | apache-2.0 | 617 |
# This module was automatically generated.
import viper.lexer as vl
from .ast import AST
from typing import List, Optional
class SingleInput(AST):
pass
class Term(AST):
pass
class FileLine(AST):
pass
class PlainStmt(AST):
pass
class Parameter(AST):
def __init__(self, external: Optional[vl.Name], internal: vl.Name, param_type: vl.Class):
self.external = external
self.internal = internal
self.param_type = param_type
class ClassArguments(AST):
def __init__(self, parents: List[vl.Class]):
self.parents = parents
class ClassStmtBlock(AST):
pass
class Modifier(AST):
pass
class Access(AST):
pass
class StmtBlock(AST):
pass
class LhsExpr(AST):
pass
class NotTestExpr(AST):
pass
class Atom(AST):
pass
class Trailer(AST):
pass
class ExprBlock(AST):
pass
class Id(AST):
pass
class FileInput(AST):
def __init__(self, lines: List[FileLine]):
self.lines = lines
class Stmt(Term):
pass
class EmptyStmt(PlainStmt):
pass
class ElseStmt(AST):
def __init__(self, else_body: StmtBlock):
self.else_body = else_body
class ClassStmt(ClassStmtBlock):
pass
class Expr(Term):
pass
class ElseExpr(AST):
def __init__(self, else_body: ExprBlock):
self.else_body = else_body
class AndTestExpr(AST):
def __init__(self, tests: List[NotTestExpr]):
self.tests = tests
class AtomExpr(AST):
def __init__(self, atom: Atom, trailers: List[Trailer]):
self.atom = atom
self.trailers = trailers
class Pattern(LhsExpr):
pass
class VarId(Id):
def __init__(self, id: vl.Name):
self.id = id
class PathPart(AST):
def __init__(self, part: Id):
self.part = part
class SingleNewline(SingleInput):
pass
class SingleLine(SingleInput):
def __init__(self, line: Term):
self.line = line
class FileNewline(FileLine):
pass
class SimpleEmptyClassStmt(ClassStmtBlock):
pass
class CompoundEmptyClassStmt(ClassStmtBlock):
pass
class StaticModifier(Modifier):
def __init__(self, access: Optional[Access]):
self.access = access
class NonstaticModifier(Modifier):
def __init__(self, access: Optional[Access]):
self.access = access
class PublicAccess(Access):
pass
class PrivateAccess(Access):
pass
class ProtectedAccess(Access):
pass
class ModuleAccess(Access):
pass
class NameAtom(Atom):
def __init__(self, name: vl.Name):
self.name = name
class ClassAtom(Atom):
def __init__(self, name: vl.Class):
self.name = name
class IntAtom(Atom):
def __init__(self, num: vl.Int):
self.num = num
class FloatAtom(Atom):
def __init__(self, num: vl.Float):
self.num = num
class StringAtom(Atom):
def __init__(self, string: vl.String):
self.string = string
class EllipsisAtom(Atom):
pass
class TrueAtom(Atom):
pass
class FalseAtom(Atom):
pass
class FieldAccess(Trailer):
def __init__(self, field: vl.Name):
self.field = field
class ClassId(Id):
def __init__(self, id: vl.Class):
self.id = id
class AssignStmt(PlainStmt):
def __init__(self, lhs: LhsExpr, expr: Expr):
self.lhs = lhs
self.expr = expr
class Definition(Stmt):
pass
class Arguments(AST):
def __init__(self, args: List[AtomExpr]):
self.args = args
class OrTestExpr(AST):
def __init__(self, tests: List[AndTestExpr]):
self.tests = tests
class SubOpExpr(AST):
def __init__(self, op: vl.Operator, atom: AtomExpr):
self.op = op
self.atom = atom
class TypedPattern(Pattern):
pass
class Path(AST):
def __init__(self, id: Id, parts: List[PathPart]):
self.id = id
self.parts = parts
class FileStmt(FileLine):
def __init__(self, stmt: Stmt):
self.stmt = stmt
class SimpleStmt(Stmt):
def __init__(self, stmt: PlainStmt):
self.stmt = stmt
class CompoundClassStmtBlock(ClassStmtBlock):
def __init__(self, stmts: List[ClassStmt]):
self.stmts = stmts
class Field(ClassStmt):
def __init__(self, modifier: Modifier, name: vl.Name, var_type: vl.Class):
self.modifier = modifier
self.name = name
self.var_type = var_type
class SimpleStmtBlock(StmtBlock):
def __init__(self, stmt: Stmt):
self.stmt = stmt
class CompoundStmtBlock(StmtBlock):
def __init__(self, stmts: List[Stmt]):
self.stmts = stmts
class SimpleExprBlock(ExprBlock):
def __init__(self, expr: Expr):
self.expr = expr
class IndentedExprBlock(ExprBlock):
def __init__(self, expr: Expr):
self.expr = expr
class FuncDef(Definition):
def __init__(self, name: vl.Name, params: List[Parameter], func_type: vl.Class, body: StmtBlock):
self.name = name
self.params = params
self.func_type = func_type
self.body = body
class ClassDef(Definition):
def __init__(self, name: vl.Class, args: Optional[ClassArguments], body: ClassStmtBlock):
self.name = name
self.args = args
self.body = body
class InterfaceDef(Definition):
def __init__(self, name: vl.Class, args: Optional[Arguments], body: StmtBlock):
self.name = name
self.args = args
self.body = body
class DataDef(Definition):
def __init__(self, name: vl.Class, args: Optional[Arguments], body: StmtBlock):
self.name = name
self.args = args
self.body = body
class TestExpr(AST):
def __init__(self, test: OrTestExpr):
self.test = test
class OpExpr(AST):
def __init__(self, left_op: Optional[vl.Operator], atom: AtomExpr, sub_op_exprs: List[SubOpExpr], right_op: Optional[vl.Operator]):
self.left_op = left_op
self.atom = atom
self.sub_op_exprs = sub_op_exprs
self.right_op = right_op
class SimplePattern(TypedPattern):
pass
class TypedVariablePattern(TypedPattern):
def __init__(self, id: VarId, pat_type: vl.Class):
self.id = id
self.pat_type = pat_type
class TypedAnonymousPattern(TypedPattern):
def __init__(self, pat_type: vl.Class):
self.pat_type = pat_type
class TypedFieldPattern(TypedPattern):
def __init__(self, root: Expr, field: VarId, pat_type: vl.Class):
self.root = root
self.field = field
self.pat_type = pat_type
class ElifStmt(AST):
def __init__(self, cond: TestExpr, elif_body: StmtBlock):
self.cond = cond
self.elif_body = elif_body
class ElifExpr(AST):
def __init__(self, cond: TestExpr, elif_body: ExprBlock):
self.cond = cond
self.elif_body = elif_body
class TestExprList(Expr):
def __init__(self, tests: List[TestExpr]):
self.tests = tests
class Call(Trailer):
def __init__(self, args: List[TestExpr]):
self.args = args
class Method(ClassStmt):
def __init__(self, modifier: Modifier, func: FuncDef):
self.modifier = modifier
self.func = func
class NegatedTestExpr(NotTestExpr):
def __init__(self, op_expr: OpExpr):
self.op_expr = op_expr
class NotNegatedTestExpr(NotTestExpr):
def __init__(self, op_expr: OpExpr):
self.op_expr = op_expr
class SimpleVariablePattern(SimplePattern):
def __init__(self, id: VarId):
self.id = id
class SimpleAnonymousPattern(SimplePattern):
pass
class SimpleFieldPattern(SimplePattern):
def __init__(self, root: Expr, field: VarId):
self.root = root
self.field = field
class SimpleParenPattern(SimplePattern):
def __init__(self, patterns: List[Pattern]):
self.patterns = patterns
class ReturnStmt(PlainStmt):
def __init__(self, tests: Optional[TestExprList]):
self.tests = tests
class CallStmt(PlainStmt):
def __init__(self, atom: AtomExpr, call: Call):
self.atom = atom
self.call = call
class IfStmt(Stmt):
def __init__(self, cond: TestExpr, then_body: StmtBlock, elif_stmts: List[ElifStmt], else_stmt: Optional[ElseStmt]):
self.cond = cond
self.then_body = then_body
self.elif_stmts = elif_stmts
self.else_stmt = else_stmt
class IfExpr(Expr):
def __init__(self, cond: TestExpr, then_body: ExprBlock, elif_exprs: List[ElifExpr], else_expr: Optional[ElseExpr]):
self.cond = cond
self.then_body = then_body
self.elif_exprs = elif_exprs
self.else_expr = else_expr
class ParenAtom(Atom):
def __init__(self, tests: Optional[TestExprList]):
self.tests = tests
| pdarragh/Viper | viper/parser/ast/nodes.py | Python | apache-2.0 | 8,645 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from os.path import dirname
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.file_utils import cached_property
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ..test_tokenization_common import TokenizerTesterMixin
SAMPLE_VOCAB = os.path.join(dirname(dirname(os.path.abspath(__file__))), "fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class ReformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = ReformerTokenizer
rust_tokenizer_class = ReformerTokenizerFast
test_rust_tokenizer = True
test_seq2seq = False
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = ReformerTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def test_convert_token_and_id(self):
"""Test ``_convert_token_to_id`` and ``_convert_id_to_token``."""
token = "<s>"
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
self.assertEqual(vocab_keys[-1], "j")
self.assertEqual(len(vocab_keys), 1_000)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 1_000)
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "I was born in 92000, and this is falsé."
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_padding(self, max_length=15):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
# Simple input
s = "This is a simple input"
s2 = ["This is a simple input 1", "This is a simple input 2"]
p = ("This is a simple input", "This is a pair")
p2 = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length")
# Simple input
self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length")
# Simple input
self.assertRaises(
ValueError,
tokenizer_r.batch_encode_plus,
s2,
max_length=max_length,
padding="max_length",
)
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length")
# Pair input
self.assertRaises(
ValueError,
tokenizer_r.batch_encode_plus,
p2,
max_length=max_length,
padding="max_length",
)
# tokenizer has no padding token
def test_padding_different_model_input_name(self):
pass
def test_full_tokenizer(self):
tokenizer = ReformerTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[285, 46, 10, 170, 382],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
@cached_property
def big_tokenizer(self):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment")
@slow
def test_tokenization_base_easy_symbols(self):
symbols = "Hello World!"
original_tokenizer_encodings = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@slow
def test_tokenization_base_hard_symbols(self):
symbols = 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
original_tokenizer_encodings = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
@require_torch
@slow
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors="pt")
batch_encoded_sequence = self.big_tokenizer.batch_encode_plus([sequence, sequence], return_tensors="pt")
config = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
config.axial_pos_shape = encoded_sequence["input_ids"].shape
model = ReformerModel(config)
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
@slow
def test_tokenizer_integration(self):
# fmt: off
expected_encoding = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
sequences = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=expected_encoding,
model_name="google/reformer-crime-and-punishment",
revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a",
padding=False,
sequences=sequences,
)
| huggingface/transformers | tests/reformer/test_tokenization_reformer.py | Python | apache-2.0 | 11,990 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from openfermion.ops import BinaryCode, FermionOperator, QubitOperator
from openfermion.transforms import binary_code_transform, dissolve
class CodeTransformTest(unittest.TestCase):
def test_transform(self):
code = BinaryCode([[1, 0, 0], [0, 1, 0]], ['W0', 'W1', '1 + W0 + W1'])
hamiltonian = FermionOperator('0^ 2', 0.5) + FermionOperator('2^ 0',
0.5)
transform = binary_code_transform(hamiltonian, code)
correct_op = QubitOperator('X0 Z1',0.25) + QubitOperator('X0',0.25)
self.assertTrue(transform == correct_op)
with self.assertRaises(TypeError):
binary_code_transform('0^ 2', code)
with self.assertRaises(TypeError):
binary_code_transform(hamiltonian,
([[1, 0], [0, 1]], ['w0', 'w1']))
def test_dissolve(self):
code = BinaryCode([[1, 0, 0], [0, 1, 0]], ['W0', 'W1', '1 + W0 W1'])
hamiltonian = FermionOperator('0^ 2', 0.5) + FermionOperator('2^ 0',
0.5)
transform = binary_code_transform(hamiltonian, code)
correct_op = QubitOperator('X0 Z1', 0.375) + \
QubitOperator('X0', -0.125) + \
QubitOperator('Y0', -0.125j) + \
QubitOperator('Y0 Z1', -0.125j)
self.assertTrue(transform == correct_op)
with self.assertRaises(ValueError):
dissolve(((1, '1'),))
| jarrodmcc/OpenFermion | src/openfermion/transforms/_binary_code_transform_test.py | Python | apache-2.0 | 2,123 |
#!/usr/bin/python
#
# Copyright 2002-2019 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
"""
PyCOMPSs API - Task
===================
This file contains the class task, needed for the task definition.
"""
from __future__ import print_function
import os
import sys
import threading
import inspect
from functools import wraps
import pycompss.api.parameter as parameter
from pycompss.runtime.core_element import CE
from pycompss.runtime.commons import IS_PYTHON3
from pycompss.runtime.commons import TRACING_HOOK_ENV_VAR
import pycompss.util.context as context
from pycompss.util.arguments import check_arguments
from pycompss.util.storages.persistent import is_psco
from pycompss.util.serialization.serializer import deserialize_from_file
from pycompss.util.serialization.serializer import serialize_to_file
from pycompss.util.serialization.serializer import serialize_to_file_mpienv
from pycompss.worker.commons.worker import build_task_parameter
if __debug__:
import logging
logger = logging.getLogger(__name__)
MANDATORY_ARGUMENTS = {}
# List since the parameter names are included before checking for unexpected
# arguments (the user can define a=INOUT in the task decorator and this is not
# an unexpected argument)
SUPPORTED_ARGUMENTS = ['compss_tracing', # private
'returns',
'priority',
'on_failure',
'time_out',
'is_replicated',
'is_distributed',
'varargs_type',
'target_direction',
'computing_nodes',
'numba',
'numba_flags',
'numba_signature',
'numba_declaration',
'tracing_hook']
# Deprecated arguments. Still supported but shows a message when used.
DEPRECATED_ARGUMENTS = ['isReplicated',
'isDistributed',
'varargsType',
'targetDirection']
# This lock allows tasks to be launched with the Threading module while
# ensuring that no attribute is overwritten
master_lock = threading.Lock()
# Determine if strings should have a sharp symbol prepended or not
prepend_strings = True
register_only = False
current_core_element = CE()
class Task(object):
"""
This is the Task decorator implementation.
It is implemented as a class and consequently this implementation can be
divided into two natural steps: decoration process and function call.
Decoration process is what happens when the Python parser reads a decorated
function. The actual function is not called, but the @task() triggers
the process that stores and processes the parameters of the decorator.
This first step corresponds to the class constructor.
Function call is what happens when the user calls their function somewhere
in the code. A decorator simply adds pre and post steps in this function
call, allowing us to change and process the arguments. This second steps
happens in the __call__ implementation.
Also, the call itself does different things in the master than in the
worker. We must also handle the case when the user just runs the app with
python and no PyCOMPSs.
The specific implementations can be found in self.master_call(),
self.worker_call(), self.sequential_call()
"""
@staticmethod
def get_default_decorator_values():
"""
Default value for decorator arguments.
By default, do not use jit (if true -> use nopython mode,
alternatively, the user can define a dictionary with the specific
flags - using a dictionary will be considered as the user wants to use
compile with jit).
:return: A dictionary with the default values of the non-parameter
decorator fields
"""
return {
'target_direction': parameter.INOUT,
'returns': False,
'priority': False,
'on_failure': 'RETRY',
'time_out': 0,
'is_replicated': False,
'is_distributed': False,
'computing_nodes': 1,
'tracing_hook': False,
'numba': False, # numba mode (jit, vectorize, guvectorize)
'numba_flags': {}, # user defined extra numba flags
'numba_signature': None, # vectorize and guvectorize signature
'numba_declaration': None, # guvectorize declaration
'varargs_type': parameter.IN # Here for legacy purposes
}
def __init__(self, comment=None, **kwargs):
"""
This part is called in the decoration process, not as an
explicit function call.
We do two things here:
a) Assign default values to unspecified fields
(see get_default_decorator_values )
b) Transform the parameters from user friendly types
(i.e Parameter.IN, etc) to a more convenient internal representation
:param comment: Hidden to the user (non-documented).
:param kwargs: Decorator parameters. A task decorator has no positional
arguments.
"""
self.comment = comment
self.decorator_arguments = kwargs
# Set missing values to their default ones (step a)
for (key, value) in self.get_default_decorator_values().items():
if key not in self.decorator_arguments:
self.decorator_arguments[key] = value
# Give all parameters a unique instance for them (step b)
# Note that when a user defines a task like
# @task(a = IN, b = IN, c = INOUT)
# both a and b point to the same IN object (the one from parameter.py)
# Giving them a unique instance makes life easier in further steps
for (key, value) in self.decorator_arguments.items():
# Not all decorator arguments are necessarily parameters
# (see self.get_default_decorator_values)
if parameter.is_parameter(value):
self.decorator_arguments[key] = \
parameter.get_parameter_copy(value)
# Specific case when value is a dictionary
# Use case example:
# @binary(binary="ls")
# @task(hide={Type: FILE_IN, Prefix: "--hide="},
# sort={Type: IN, Prefix: "--sort="})
# def myLs(flag, hide, sort):
# pass
# Transform this dictionary to a Parameter object
if parameter.is_dict_specifier(value):
if key not in ['numba', 'numba_flags',
'numba_signature', 'numba_declaration']:
# Perform user -> instance substitution
# param = self.decorator_arguments[key][parameter.Type]
# Replace the whole dict by a single parameter object
self.decorator_arguments[key] = \
parameter.get_parameter_from_dictionary(
self.decorator_arguments[key]
)
# self.decorator_arguments[key].update(
# {parameter.Type: parameter.get_parameter_copy(param)}
# )
else:
# It is a reserved word that we need to keep the user
# defined value (not a Parameter object)
self.decorator_arguments[key] = value
# Add more argument related attributes that will be useful later
self.parameters = None
self.param_args = None
self.param_varargs = None
self.param_kwargs = None
self.param_defaults = None
self.first_arg_name = None
# Add function related attributed that will be useful later
self.module_name = None
self.function_name = None
self.function_type = None
self.class_name = None
self.computing_nodes = None
# Add returns related attributes that will be useful later
self.returns = None
self.multi_return = False
# Task wont be registered until called from the master for the first
# time or have a different signature
self.signature = None
self.registered = False
def __call__(self, user_function):
"""
This part is called in all explicit function calls.
Note that in PyCOMPSs a single function call will be transformed into
two calls, as both master and worker need to call the function.
The work to do in the master part is totally different
from the job to do in the worker part. This is why there are
some other functions like master_call, worker_call, and
sequential_call
There is also a third case that happens when the user runs a PyCOMPSs
code without PyCOMPSs. This case is straightforward: just call the
user function with the user parameters and return whatever the user
code returned. Therefore, we can just return the user function.
:param user_function: Function to decorate
:return: The function to be executed
"""
self.user_function = user_function
self.update_if_interactive()
@wraps(user_function)
def task_decorator(*args, **kwargs):
# Determine the context and decide what to do
if context.in_master():
return self.master_call(*args, **kwargs)
elif context.in_worker():
if 'compss_key' in kwargs.keys():
return self.worker_call(*args, **kwargs)
else:
# Called from another task within the worker
# Ignore the @task decorator and run it sequentially
message = "WARNING: Calling task: "
message += str(user_function.__name__)
message += " from this task.\n"
message += " It will be executed sequentially "
message += "within the caller task."
print(message, file=sys.stderr)
return self.sequential_call(*args, **kwargs)
# We are neither in master nor in the worker, or the user has
# stopped the interactive session.
# Therefore, the user code is being executed with no
# launch_compss/enqueue_compss/runcompss/interactive session
return self.sequential_call(*args, **kwargs)
return task_decorator
def update_if_interactive(self):
"""
Update the user code if in interactive mode and the session has
been started.
:return: None
"""
import inspect
mod = inspect.getmodule(self.user_function)
self.module_name = mod.__name__
if context.in_pycompss() and \
(self.module_name == '__main__' or
self.module_name == 'pycompss.runtime.launch'):
# 1.- The runtime is running.
# 2.- The module where the function is defined was run as __main__,
# We need to find out the real module name
# Get the real module name from our launch.py APP_PATH global
# variable
# It is guaranteed that this variable will always exist because
# this code is only executed when we know we are in the master
path = getattr(mod, 'APP_PATH')
# Get the file name
file_name = os.path.splitext(os.path.basename(path))[0]
# Do any necessary pre processing action before executing any code
if file_name.startswith('InteractiveMode') and not self.registered:
# If the file_name starts with 'InteractiveMode' means that
# the user is using PyCOMPSs from jupyter-notebook.
# Convention between this file and interactive.py
# In this case it is necessary to do a pre-processing step
# that consists of putting all user code that may be executed
# in the worker on a file.
# This file has to be visible for all workers.
from pycompss.util.interactive.helpers import update_tasks_code_file # noqa
update_tasks_code_file(self.user_function, path)
else:
pass
def update_return_if_no_returns(self, f):
"""
Checks the code looking for return statements if no returns is
specified in @task decorator.
WARNING: Updates self.return if returns are found.
:param f: Function to check
"""
from pycompss.api.parameter import Parameter
from pycompss.api.parameter import DIRECTION
from pycompss.api.parameter import TYPE
from pycompss.util.objects.properties import get_wrapped_source
import ast
source_code = get_wrapped_source(f).strip()
if self.first_arg_name == 'self' or \
source_code.startswith('@classmethod'):
# TODO: WHAT IF IS CLASSMETHOD FROM BOOLEAN?
# It is a task defined within a class (can not parse the code
# with ast since the class does not exist yet).
# Alternatively, the only way I see is to parse it manually
# line by line.
ret_mask = []
code = source_code.split('\n')
for line in code:
if 'return ' in line:
ret_mask.append(True)
else:
ret_mask.append(False)
else:
code = [node for node in ast.walk(ast.parse(source_code))]
ret_mask = [isinstance(node, ast.Return) for node in code]
if any(ret_mask):
has_multireturn = False
lines = [i for i, li in enumerate(ret_mask) if li]
max_num_returns = 0
if self.first_arg_name == 'self' or \
source_code.startswith('@classmethod'):
# Parse code as string (it is a task defined within a class)
def _has_multireturn(statement):
v = ast.parse(statement.strip())
try:
if len(v.body[0].value.elts) > 1:
return True
else:
return False
except (KeyError, AttributeError):
# KeyError: 'elts' means that it is a multiple return.
# "Ask forgiveness not permission"
return False
def _get_return_elements(statement):
v = ast.parse(statement.strip())
return len(v.body[0].value.elts)
for i in lines:
if _has_multireturn(code[i]):
has_multireturn = True
num_returns = _get_return_elements(code[i])
if num_returns > max_num_returns:
max_num_returns = num_returns
else:
# Parse code AST (it is not a task defined within a class)
for i in lines:
try:
if 'elts' in code[i].value.__dict__:
has_multireturn = True
num_returns = len(code[i].value.__dict__['elts'])
if num_returns > max_num_returns:
max_num_returns = num_returns
except (KeyError, AttributeError):
# KeyError: 'elts' means that it is a multiple return.
# "Ask forgiveness not permission"
pass
if has_multireturn:
for i in range(max_num_returns):
param = Parameter(p_type=TYPE.FILE,
p_direction=DIRECTION.OUT)
param.object = object()
self.returns[parameter.get_return_name(i)] = param
else:
param = Parameter(p_type=TYPE.FILE,
p_direction=DIRECTION.OUT)
param.object = object()
self.returns[parameter.get_return_name(0)] = param
else:
# Return not found
pass
def prepare_core_element_information(self, f):
"""
This function is used to prepare the core element.
The information is needed in order to compare the implementation
signature, so that if it has been registered with a different
signature, it can be re-registered with the new one (enable
inheritance).
:param f: Function to be registered
"""
def _get_top_decorator(code, dec_keys):
"""
Retrieves the decorator which is on top of the current task
decorators stack.
:param code: Tuple which contains the task code to analyse and
the number of lines of the code.
:param dec_keys: Typle which contains the available decorator keys
:return: the decorator name in the form "pycompss.api.__name__"
"""
# Code has two fields:
# code[0] = the entire function code.
# code[1] = the number of lines of the function code.
dec_func_code = code[0]
decorators = [l.strip() for l in
dec_func_code if l.strip().startswith('@')]
# Could be improved if it stops when the first line without @ is
# found, but we have to be careful if a decorator is commented
# (# before @).
# The strip is due to the spaces that appear before functions
# definitions, such as class methods.
for dk in dec_keys:
for d in decorators:
if d.startswith('@' + dk):
# check each decorator's __name__ to lower
return "pycompss.api." + dk.lower()
# If no decorator is found, then the current decorator is the one
# to register
return __name__
def _get_task_type(code, dec_filter, default_values):
"""
Retrieves the type of the task based on the decorators stack.
:param code: Tuple which contains the task code to analyse and the
number of lines of the code.
:param dec_filter: Tuple which contains the filtering decorators.
The one used determines the type of the task.
If none, then it is a normal task.
:param default_values: Default values
:return: the type of the task
"""
# Code has two fields:
# code[0] = the entire function code.
# code[1] = the number of lines of the function code.
dec_func_code = code[0]
full_decorators = [l.strip() for l in
dec_func_code if l.strip().startswith('@')]
# Get only the decorators used. Remove @ and parameters.
decorators = [l[1:].split('(')[0] for l in full_decorators]
# Look for the decorator used from the filter list and return it
# when found if @mpi and no binary then this is an python_mpi task
index = 0
for filt in dec_filter:
if filt in decorators:
if filt == "mpi":
if "binary" not in full_decorators[index]:
filt = "PYTHON_MPI"
return filt
index += 1
# The decorator stack did not contain any of the filtering keys,
# then return the default key.
return default_values
# Look for the decorator that has to do the registration
# Since the __init__ of the decorators is independent, there is no way
# to pass information through them.
# However, the __call__ method of the decorators can be used.
# The way that they are called is from bottom to top. So, the first one
# to call its __call__ method will always be @task. Consequently, the
# @task decorator __call__ method can detect the top decorator and pass
# a hint to order that decorator that has to do the registration (not
# the others).
func_code = ''
got_func_code = False
func = f
while not got_func_code:
try:
from pycompss.util.objects.properties import get_wrapped_sourcelines # noqa
func_code = get_wrapped_sourcelines(func)
got_func_code = True
except IOError:
# There is one or more decorators below the @task -> undecorate
# until possible to get the func code.
# Example of this case: test 19: @timeit decorator below the
# @task decorator.
func = func.__wrapped__
decorator_keys = ("implement",
"constraint",
"task",
"binary",
"mpi",
"compss",
"decaf",
"ompss",
"opencl")
top_decorator = _get_top_decorator(func_code, decorator_keys)
if __debug__:
logger.debug(
"[@TASK] Top decorator of function %s in module %s: %s" %
(f.__name__, self.module_name, str(top_decorator))
)
f.__who_registers__ = top_decorator
# not usual tasks - handled by the runtime without invoking the
# PyCOMPSs worker. Needed to filter in order not to code the strings
# when using them in these type of tasks
decorator_filter = ("binary",
"mpi",
"compss",
"decaf",
"ompss",
"opencl")
default = 'task'
task_type = _get_task_type(func_code, decorator_filter, default)
if __debug__:
logger.debug("[@TASK] Task type of function %s in module %s: %s" %
(f.__name__, self.module_name, str(task_type)))
f.__task_type__ = task_type
if task_type == default:
f.__code_strings__ = True
else:
if task_type == "PYTHON_MPI":
for line in func_code[0]:
if "@mpi" in line:
f.__code_strings__ = "binary" not in line
else:
f.__code_strings__ = False
# Get the task signature
# To do this, we will check the frames
import inspect
frames = inspect.getouterframes(inspect.currentframe())
# Pop the __register_task and __call__ functions from the frame
frames = frames[2:]
# Get the application frames
app_frames = []
for frame in frames:
if frame[3] == 'compss_main':
break
else:
app_frames.append(frame)
# Analise the frames
if len(app_frames) == 1:
# The task is defined within the main app file.
# This case is never reached with Python 3 since it includes
# frames that are not present with Python 2.
ce_signature = self.module_name + "." + f.__name__
impl_type_args = [self.module_name, f.__name__]
else:
if self.class_name:
# Within class or subclass
ce_signature = self.module_name + '.' + \
self.class_name + '.' + \
f.__name__
impl_type_args = [self.module_name + '.' + self.class_name,
f.__name__]
else:
# Not in a class or subclass
# This case can be reached in Python 3, where particular
# frames are included, but not class names found.
ce_signature = self.module_name + "." + f.__name__
impl_type_args = [self.module_name, f.__name__]
# Include the registering info related to @task
impl_signature = ce_signature
impl_constraints = {}
impl_type = "METHOD"
# Maybe some top decorator has already added some parameters
# These if statements avoid us to overwrite these already
# existing attributes
# For example, the constraint decorator adds things in the
# impl_constraints field, so it would be nice to not overwrite it!
if current_core_element.get_ce_signature() is None:
current_core_element.set_ce_signature(ce_signature)
else:
# If we are here that means that we come from an implements
# decorator, which means that this core element has already
# a signature
current_core_element.set_impl_signature(ce_signature)
if current_core_element.get_impl_signature() is None:
current_core_element.set_impl_signature(impl_signature)
if current_core_element.get_impl_constraints() is None:
current_core_element.set_impl_constraints(impl_constraints)
if current_core_element.get_impl_type() is None:
current_core_element.set_impl_type(impl_type)
if current_core_element.get_impl_type_args() is None:
current_core_element.set_impl_type_args(impl_type_args)
if current_core_element.get_impl_type() == "PYTHON_MPI":
current_core_element.set_impl_signature("MPI." + impl_signature)
current_core_element.set_impl_type_args(
impl_type_args+current_core_element.get_impl_type_args()[1:])
return impl_signature
def register_task(self, f):
"""
This function is used to register the task in the runtime.
This registration must be done only once on the task decorator
initialization
:param f: Function to be registered
"""
import pycompss.runtime.binding as binding
if __debug__:
logger.debug(
"[@TASK] I have to register the function %s in module %s" %
(f.__name__, self.module_name)
)
logger.debug("[@TASK] %s" % str(f))
binding.register_ce(current_core_element)
@staticmethod
def _getargspec(function):
if IS_PYTHON3:
full_argspec = inspect.getfullargspec(function)
as_args = full_argspec.args
as_varargs = full_argspec.varargs
as_keywords = full_argspec.varkw
as_defaults = full_argspec.defaults
return as_args, as_varargs, as_keywords, as_defaults
else:
return inspect.getargspec(function)
def inspect_user_function_arguments(self):
"""
Inspect the arguments of the user function and store them.
Read the names of the arguments and remember their order.
We will also learn things like if the user function contained
variadic arguments, named arguments and so on.
This will be useful when pairing arguments with the direction
the user has specified for them in the decorator
:return: None, it just adds attributes
"""
try:
arguments = self._getargspec(self.user_function)
self.param_args, self.param_varargs, self.param_kwargs, self.param_defaults = arguments # noqa
except TypeError:
# This is a numba jit declared task
arguments = self._getargspec(self.user_function.py_func)
self.param_args, self.param_varargs, self.param_kwargs, self.param_defaults = arguments # noqa
# It will be easier to deal with functions if we pretend that all have
# the signature f(positionals, *variadic, **named). This is why we are
# substituting
# Nones with default stuff
# As long as we remember what was the users original intention with
# the parameters we can internally mess with his signature as much as
# we want. There is no need to add self-imposed constraints here.
# Also, the very nature of decorators are a huge hint about how we
# should treat user functions, as most wrappers return a function
# f(*a, **k)
if self.param_varargs is None:
self.param_varargs = 'varargs_type'
if self.param_defaults is None:
self.param_defaults = ()
def compute_module_name(self):
"""
Compute the user's function module name.
There are various cases:
1) The user function is defined in some file. This is easy, just get
the module returned by inspect.getmodule
2) The user function is in the main module. Retrieve the file and
build the import name from it
3) We are in interactive mode
:return: None, it just modifies self.module_name
"""
import inspect
import os
mod = inspect.getmodule(self.user_function)
self.module_name = mod.__name__
# If it is a task within a class, the module it will be where the one
# where the class is defined, instead of the one where the task is
# defined.
# This avoids conflicts with task inheritance.
if self.first_arg_name == 'self':
mod = inspect.getmodule(type(self.parameters['self'].object))
self.module_name = mod.__name__
elif self.first_arg_name == 'cls':
self.module_name = self.parameters['cls'].object.__module__
if self.module_name == '__main__' or \
self.module_name == 'pycompss.runtime.launch':
# The module where the function is defined was run as __main__,
# We need to find out the real module name
# Get the real module name from our launch.py APP_PATH global
# variable
# It is guaranteed that this variable will always exist because
# this code is only executed when we know we are in the master
path = getattr(mod, 'APP_PATH')
# Get the file name
file_name = os.path.splitext(os.path.basename(path))[0]
# Get the module
from pycompss.util.objects.properties import get_module_name
self.module_name = get_module_name(path, file_name)
def compute_function_type(self):
"""
Compute some properties of the user function, as its name,
its import path, and its type (module function, instance method,
class method), etc.
:return: None, just updates self.class_name and self.function_type
"""
from pycompss.runtime.binding import FunctionType
# Check the type of the function called.
# inspect.ismethod(f) does not work here,
# for methods python hasn't wrapped the function as a method yet
# Everything is still a function here, can't distinguish yet
# with inspect.ismethod or isfunction
self.function_type = FunctionType.FUNCTION
self.class_name = ''
if self.first_arg_name == 'self':
self.function_type = FunctionType.INSTANCE_METHOD
self.class_name = type(self.parameters['self'].object).__name__
elif self.first_arg_name == 'cls':
self.function_type = FunctionType.CLASS_METHOD
self.class_name = self.parameters['cls'].object.__name__
# Finally, check if the function type is really a module function or
# a static method.
# Static methods are ONLY supported with Python 3 due to __qualname__
# feature, which enables to know to which class they belong.
# The class name is needed in order to define properly the class_name
# for the correct registration and later invoke.
# Since these methods don't have self, nor cls, they are considered as
# FUNCTIONS to the runtime
if IS_PYTHON3:
name = self.function_name
qualified_name = self.user_function.__qualname__
if name != qualified_name:
# Then there is a class definition before the name in the
# qualified name
self.class_name = qualified_name[:-len(name) - 1]
# -1 to remove the last point
def compute_user_function_information(self):
"""
Compute the function path p and the name n such that
"from p import n" imports self.user_function
:return: None, it just sets self.user_function_path and
self.user_function_name
"""
self.function_name = self.user_function.__name__
# Get the module name (the x part "from x import y"), except for the
# class name
self.compute_module_name()
# Get the function type (function, instance method, class method)
self.compute_function_type()
def add_return_parameters(self):
"""
Modify the return parameters accordingly to the return statement
:return: Nothing, it just creates and modifies self.returns
"""
from collections import OrderedDict
self.returns = OrderedDict()
# Note that returns is by default False
if self.decorator_arguments['returns']:
# A return statement can be the following:
# 1) A type. This means 'this task returns an object of this type'
# 2) An integer N. This means 'this task returns N objects'
# 3) A basic iterable (tuple, list...). This means 'this task
# returns an iterable with the indicated elements inside
from pycompss.util.objects.properties import is_basic_iterable
# We are returning multiple objects until otherwise proven
# It is important to know because this will determine if we will
# return a single object or [a single object] in some cases
self.multi_return = True
if isinstance(self.decorator_arguments['returns'], str):
# Check if the returns statement contains an string with an
# integer or a global variable.
# In such case, build a list of objects of value length and
# set it in ret_type.
# Global variable or string wrapping integer value
try:
# Return is hidden by an int as a string.
# i.e., returns="var_int"
num_rets = int(self.decorator_arguments['returns'])
except ValueError:
# Return is hidden by a global variable. i.e., LT_ARGS
try:
num_rets = self.user_function.__globals__.get(
self.decorator_arguments['returns'])
except AttributeError:
# This is a numba jit declared task
num_rets = self.user_function.py_func.__globals__.get(
self.decorator_arguments['returns'])
# Construct hidden multireturn
if num_rets > 1:
to_return = [tuple([]) for _ in range(num_rets)]
else:
to_return = tuple([])
elif is_basic_iterable(self.decorator_arguments['returns']):
# The task returns a basic iterable with some types
# already defined
to_return = self.decorator_arguments['returns']
elif isinstance(self.decorator_arguments['returns'], int):
# The task returns a list of N objects, defined by the int N
to_return = tuple([() for _ in
range(self.decorator_arguments['returns'])])
else:
# The task returns a single object of a single type
# This is also the only case when no multiple objects are
# returned but only one
self.multi_return = False
to_return = [self.decorator_arguments['returns']]
# At this point we have a list of returns
for (i, elem) in enumerate(to_return):
ret_type = parameter.get_compss_type(elem)
self.returns[parameter.get_return_name(i)] = \
parameter.Parameter(p_type=ret_type,
p_object=elem,
p_direction=parameter.OUT)
# Hopefully, an exception have been thrown if some invalid
# stuff has been put in the returns field
def master_call(self, *args, **kwargs):
"""
This part deals with task calls in the master's side
Also, this function must return an appropriate number of
future objects that point to the appropriate objects/files.
:return: A function that does "nothing" and returns futures if needed
"""
# This lock makes this decorator able to handle various threads
# calling the same task concurrently
master_lock.acquire()
# Inspect the user function, get information about the arguments and
# their names. This defines self.param_args, self.param_varargs,
# self.param_kwargs, self.param_defaults. And gives non-None default
# values to them if necessary
self.inspect_user_function_arguments()
# Process the parameters, give them a proper direction
self.process_master_parameters(*args, **kwargs)
# Compute the function path, class (if any), and name
self.compute_user_function_information()
# Process the decorators to get the core element information
# It is necessary to decide whether to register or not (the task may
# be inherited, and in this case it has to be registered again with
# the new implementation signature).
impl_signature = self.prepare_core_element_information(
self.user_function)
if not self.registered or self.signature != impl_signature:
self.register_task(self.user_function)
self.registered = True
self.signature = impl_signature
# Reset the global core element to a full-None status, ready for the
# next task! (Note that this region is locked, so no race conditions
# will ever happen here).
current_core_element.reset()
# Did we call this function to only register the associated core
# element? (This can happen when trying)
if register_only:
master_lock.release()
return
# Deal with the return part.
self.add_return_parameters()
if not self.returns:
self.update_return_if_no_returns(self.user_function)
from pycompss.runtime.binding import process_task
# Get deprecated arguments if exist
if 'isReplicated' in self.decorator_arguments:
is_replicated = self.decorator_arguments['isReplicated']
else:
is_replicated = self.decorator_arguments['is_replicated']
if 'isDistributed' in self.decorator_arguments:
is_distributed = self.decorator_arguments['isDistributed']
else:
is_distributed = self.decorator_arguments['is_distributed']
# Process the task
ret = process_task(
self.user_function,
self.module_name,
self.class_name,
self.function_type,
self.parameters,
self.returns,
self.decorator_arguments,
self.computing_nodes,
is_replicated,
is_distributed,
self.decorator_arguments['on_failure'],
self.decorator_arguments['time_out']
)
master_lock.release()
return ret
def get_varargs_direction(self):
"""
Returns the direction of the varargs arguments.
Can be defined in the decorator in two ways:
args = dir, where args is the name of the variadic args tuple, or
varargs_type = dir (for legacy reasons)
:return: Direction of the varargs arguments.
"""
if self.param_varargs not in self.decorator_arguments:
if 'varargsType' in self.decorator_arguments:
self.param_varargs = 'varargsType'
return self.decorator_arguments['varargsType']
else:
return self.decorator_arguments['varargs_type']
return self.decorator_arguments[self.param_varargs]
def get_default_direction(self, var_name):
"""
Returns the default direction for a given parameter
:return: An identifier of the direction
"""
# We are the 'self' or 'cls' in an instance or classmethod that
# modifies the given class, so we are an INOUT, CONCURRENT or
# COMMUTATIVE
self_dirs = [parameter.DIRECTION.INOUT,
parameter.DIRECTION.CONCURRENT,
parameter.DIRECTION.COMMUTATIVE]
if 'targetDirection' in self.decorator_arguments:
target_label = 'targetDirection'
else:
target_label = 'target_direction'
if self.decorator_arguments[target_label].direction in self_dirs and \
var_name in ['self', 'cls'] and \
self.param_args and \
self.param_args[0] == var_name:
return self.decorator_arguments[target_label]
return parameter.get_new_parameter('IN')
def process_master_parameters(self, *args, **kwargs):
"""
Process all the input parameters.
Basically, processing means "build a dictionary of <name, parameter>,
where each parameter has an associated Parameter object".
This function also assigns default directions to parameters.
:return: None, it only modifies self.parameters
"""
from collections import OrderedDict
parameter_values = OrderedDict()
# If we have an MPI, COMPSs or MultiNode decorator above us we should
# have computing_nodes as a kwarg, we should detect it and remove it.
# Otherwise we set it to 1
self.computing_nodes = kwargs.pop('computing_nodes', 1)
# It is important to know the name of the first argument to determine
# if we are dealing with a class or instance method (i.e: first
# argument is named self)
self.first_arg_name = None
# Process the positional arguments
# Some of these positional arguments may have been not
# explicitly defined
num_positionals = min(len(self.param_args), len(args))
for (var_name, var_value) in zip(self.param_args[:num_positionals],
args[:num_positionals]):
if self.first_arg_name is None:
self.first_arg_name = var_name
parameter_values[var_name] = var_value
num_defaults = len(self.param_defaults)
# Give default values to all the parameters that have a
# default value and are not already set
# As an important observation, defaults are matched as follows:
# defaults[-1] goes with positionals[-1]
# defaults[-2] goes with positionals[-2]
# ...
# Also, |defaults| <= |positionals|
for (var_name, default_value) in reversed(list(zip(list(reversed(self.param_args))[:num_defaults], # noqa
list(reversed(self.param_defaults))))): # noqa
if var_name not in parameter_values:
real_var_name = parameter.get_kwarg_name(var_name)
parameter_values[real_var_name] = default_value
# Process variadic and keyword arguments
# Note that they are stored with custom names
# This will allow us to determine the class of each parameter
# and their order in the case of the variadic ones
# Process the variadic arguments
for (i, var_arg) in enumerate(args[num_positionals:]):
parameter_values[parameter.get_vararg_name(self.param_varargs, i)] = var_arg # noqa
# Process keyword arguments
for (name, value) in kwargs.items():
parameter_values[parameter.get_kwarg_name(name)] = value
# Build a dictionary of parameters
self.parameters = OrderedDict()
# Assign directions to parameters
for var_name in parameter_values.keys():
# Is the argument a vararg? or a kwarg? Then check the direction
# for varargs or kwargs
if parameter.is_vararg(var_name):
self.parameters[var_name] = parameter.get_parameter_copy(self.get_varargs_direction()) # noqa
elif parameter.is_kwarg(var_name):
real_name = parameter.get_name_from_kwarg(var_name)
self.parameters[var_name] = self.decorator_arguments.get(real_name, # noqa
self.get_default_direction(real_name)) # noqa
else:
# The argument is named, check its direction
# Default value = IN if not class or instance method and
# isModifier, INOUT otherwise
# see self.get_default_direction
# Note that if we have something like @task(self = IN) it
# will have priority over the default
# direction resolution, even if this implies a contradiction
# with the target_direction flag
self.parameters[var_name] = self.decorator_arguments.get(var_name, # noqa
self.get_default_direction(var_name)) # noqa
# If the parameter is a FILE then its type will already be defined,
# and get_compss_type will misslabel it as a TYPE.STRING
if self.parameters[var_name].type is None:
self.parameters[var_name].type = parameter.get_compss_type(parameter_values[var_name]) # noqa
if self.parameters[var_name].type == parameter.TYPE.FILE:
self.parameters[var_name].file_name = parameter_values[var_name] # noqa
else:
self.parameters[var_name].object = parameter_values[var_name]
# Check the arguments - Look for mandatory and unexpected arguments
supported_args = SUPPORTED_ARGUMENTS + DEPRECATED_ARGUMENTS + self.param_args # noqa
check_arguments(MANDATORY_ARGUMENTS,
DEPRECATED_ARGUMENTS,
supported_args,
list(self.decorator_arguments.keys()),
"@task")
def get_parameter_direction(self, name):
"""
Returns the direction of any parameter
:param name: Name of the parameter
:return: Its direction inside this task
"""
if parameter.is_vararg(name):
return self.get_varargs_direction()
elif parameter.is_return(name):
return parameter.get_new_parameter('OUT')
orig_name = parameter.get_name_from_kwarg(name)
if orig_name in self.decorator_arguments:
return self.decorator_arguments[orig_name]
return self.get_default_direction(orig_name)
def update_direction_of_worker_parameters(self, args):
"""
Update worker parameter directions, will be useful to determine if
files should be written later
:param args: List of arguments
:return: None. Modifies args
"""
for arg in args:
arg.direction = self.get_parameter_direction(arg.name)
def is_parameter_object(self, name):
"""
Given the name of a parameter, determine if it is an object or not
:param name: Name of the parameter
:return: True if the parameter is a (serializable) object
"""
original_name = parameter.get_original_name(name)
# Get the args parameter object
if parameter.is_vararg(original_name):
return self.get_varargs_direction().type is None
# Is this parameter annotated in the decorator?
if original_name in self.decorator_arguments:
annotated = [parameter.TYPE.COLLECTION,
parameter.TYPE.EXTERNAL_STREAM,
None]
return self.decorator_arguments[original_name].type in annotated
# The parameter is not annotated in the decorator, so (by default)
# return True
return True
def is_parameter_file_collection (self, name):
"""
Given the name of a parameter, determine if it is an file collection or not
:param name: Name of the parameter
:return: True if the parameter is a file collection
"""
original_name = parameter.get_original_name(name)
# Get the args parameter object
if parameter.is_vararg(original_name):
return self.get_varargs_direction().is_file_collection
# Is this parameter annotated in the decorator?
if original_name in self.decorator_arguments:
return self.decorator_arguments[original_name].is_file_collection
# The parameter is not annotated in the decorator, so (by default)
# return False
return False
def reveal_objects(self, args):
"""
This function takes the arguments passed from the persistent worker
and treats them to get the proper parameters for the user function.
:param args: Arguments
:return: None
"""
def storage_supports_pipelining():
# Some storage implementations use pipelining
# Pipelining means "accumulate the getByID queries and perform them
# in a single megaquery".
# If this feature is not available (storage does not support it)
# getByID operations will be performed one after the other
try:
import storage.api
return storage.api.__pipelining__
except Exception:
return False
def retrieve_content(arg, name_prefix):
# This case is special, as a FILE can actually mean a FILE or an
# object that is serialized in a file
if parameter.is_vararg(arg.name):
self.param_varargs = arg.name
if arg.type == parameter.TYPE.FILE:
if self.is_parameter_object(arg.name):
# The object is stored in some file, load and deserialize
arg.content = deserialize_from_file(
arg.file_name.split(':')[-1]
)
else:
# The object is a FILE, just forward the path of the file
# as a string parameter
arg.content = arg.file_name.split(':')[-1]
elif arg.type == parameter.TYPE.EXTERNAL_STREAM:
arg.content = deserialize_from_file(arg.file_name)
elif arg.type == parameter.TYPE.COLLECTION:
arg.content = []
# This field is exclusive for COLLECTION_T parameters, so make
# sure you have checked this parameter is a collection before
# consulting it
arg.collection_content = []
col_f_name = arg.file_name.split(':')[-1]
if not os.path.exists(col_f_name):
col_f_name = "../" + col_f_name
for (i, line) in enumerate(open(col_f_name, 'r')):
content_type, content_file = line.strip().split(' ')
# Same naming convention as in COMPSsRuntimeImpl.java
sub_name = "%s.%d" % (arg.name, i)
if name_prefix:
sub_name = "%s.%s" % (name_prefix, arg.name)
else:
sub_name = "@%s" % sub_name
if not self.is_parameter_file_collection(arg.name):
sub_arg, _ = build_task_parameter(int(content_type),
None,
"",
sub_name,
content_file)
# Recursively call the retrieve method, fill the content
# field in our new taskParameter object
retrieve_content(sub_arg, sub_name)
arg.content.append(sub_arg.content)
arg.collection_content.append(sub_arg)
else:
arg.content.append(content_file)
arg.collection_content.append(content_file)
elif not storage_supports_pipelining() and \
arg.type == parameter.TYPE.EXTERNAL_PSCO:
# The object is a PSCO and the storage does not support
# pipelining, do a single getByID of the PSCO
from storage.api import getByID
arg.content = getByID(arg.key)
# If we have not entered in any of these cases we will assume
# that the object was a basic type and the content is already
# available and properly casted by the python worker
if storage_supports_pipelining():
# Perform the pipelined getByID operation
pscos = [x for x in args if x.type == parameter.TYPE.EXTERNAL_PSCO]
identifiers = [x.key for x in pscos]
from storage.api import getByID
objects = getByID(*identifiers)
# Just update the TaskParameter object with its content
for (obj, value) in zip(objects, pscos):
obj.content = value
# Deal with all the parameters that are NOT returns
for arg in [x for x in args if isinstance(x, parameter.TaskParameter) and not parameter.is_return(x.name)]: # noqa
retrieve_content(arg, "")
def worker_call(self, *args, **kwargs):
"""
This part deals with task calls in the worker's side
Note that the call to the user function is made by the worker,
not by the user code.
:return: A function that calls the user function with the given
parameters and does the proper serializations and updates
the affected objects.
"""
# Self definition (only used when defined in the task)
self_type = None
self_value = None
# All parameters are in the same args list. At the moment we only know
# the type, the name and the "value" of the parameter. This value may
# be treated to get the actual object (e.g: deserialize it, query the
# database in case of persistent objects, etc.)
self.reveal_objects(args)
# After this line all the objects in arg have a "content" field, now
# we will segregate them in User positional and variadic args
user_args = []
# User named args (kwargs)
user_kwargs = {}
# Return parameters, save them apart to match the user returns with
# the internal parameters
ret_params = []
for arg in args:
# Just fill the three data structures declared above
# Deal with the self parameter (if any)
if not isinstance(arg, parameter.TaskParameter):
user_args.append(arg)
# All these other cases are all about regular parameters
elif parameter.is_return(arg.name):
ret_params.append(arg)
elif parameter.is_kwarg(arg.name):
user_kwargs[parameter.get_name_from_kwarg(arg.name)] = \
arg.content
else:
if parameter.is_vararg(arg.name):
self.param_varargs = parameter.get_varargs_name(arg.name)
# Apart from the names we preserve the original order, so it
# is guaranteed that named positional arguments will never be
# swapped with variadic ones or anything similar
user_args.append(arg.content)
num_returns = len(ret_params)
# Save the self object type and value before executing the task
# (it could be persisted inside if its a persistent object)
has_self = False
if args and not isinstance(args[0], parameter.TaskParameter):
# Then the first arg is self
has_self = True
self_type = parameter.get_compss_type(args[0])
if self_type == parameter.TYPE.EXTERNAL_PSCO:
self_value = args[0].getID()
else:
# Since we are checking the type of the deserialized self
# parameter, get_compss_type will return that its type is
# parameter.TYPE.OBJECT, which although it is an object, self
# is always a file for the runtime. So we must force its type
# to avoid that the return message notifies that it has a new
# type "object" which is not supported for python objects in
# the runtime.
self_type = parameter.TYPE.FILE
self_value = 'null'
# Tracing hook is disabled by default during the user code of the task.
# The user can enable it with tracing_hook=True in @task decorator for
# specific tasks or globally with the COMPSS_TRACING_HOOK=true
# environment variable.
restore_hook = False
pro_f = None
if kwargs['compss_tracing']:
global_tracing_hook = False
if TRACING_HOOK_ENV_VAR in os.environ:
hook_enabled = os.environ[TRACING_HOOK_ENV_VAR] == "true"
global_tracing_hook = hook_enabled
if self.decorator_arguments['tracing_hook'] or global_tracing_hook:
# The user wants to keep the tracing hook
pass
else:
# When Extrae library implements the function to disable,
# use it, as:
# import pyextrae
# pro_f = pyextrae.shutdown()
# Since it is not available yet, we manage the tracing hook
# by ourselves
pro_f = sys.getprofile()
sys.setprofile(None)
restore_hook = True
# Call the user function with all the reconstructed parameters and
# get the return values
if self.decorator_arguments['numba']:
# Import all supported functionalities
from numba import jit
from numba import njit
from numba import generated_jit
from numba import vectorize
from numba import guvectorize
from numba import stencil
from numba import cfunc
numba_mode = self.decorator_arguments['numba']
numba_flags = self.decorator_arguments['numba_flags']
if type(numba_mode) is dict:
# Use the flags defined by the user
numba_flags['cache'] = True # Always force cache
user_returns = \
jit(self.user_function,
**numba_flags)(*user_args, **user_kwargs)
elif numba_mode is True or numba_mode == 'jit':
numba_flags['cache'] = True # Always force cache
user_returns = jit(self.user_function,
**numba_flags)(*user_args,
**user_kwargs)
# Alternative way of calling:
# user_returns = jit(cache=True)(self.user_function) \
# (*user_args, **user_kwargs)
elif numba_mode == 'generated_jit':
user_returns = generated_jit(self.user_function,
**numba_flags)(*user_args,
**user_kwargs)
elif numba_mode == 'njit':
numba_flags['cache'] = True # Always force cache
user_returns = njit(self.user_function,
**numba_flags)(*user_args, **user_kwargs)
elif numba_mode == 'vectorize':
numba_signature = self.decorator_arguments['numba_signature']
user_returns = vectorize(
numba_signature,
**numba_flags
)(self.user_function)(*user_args, **user_kwargs)
elif numba_mode == 'guvectorize':
numba_signature = self.decorator_arguments['numba_signature']
numba_decl = self.decorator_arguments['numba_declaration']
user_returns = guvectorize(
numba_signature,
numba_decl,
**numba_flags
)(self.user_function)(*user_args, **user_kwargs)
elif numba_mode == 'stencil':
user_returns = stencil(
**numba_flags
)(self.user_function)(*user_args, **user_kwargs)
elif numba_mode == 'cfunc':
numba_signature = self.decorator_arguments['numba_signature']
user_returns = cfunc(
numba_signature
)(self.user_function).ctypes(*user_args,
**user_kwargs)
else:
raise Exception("Unsupported numba mode.")
else:
# Normal task execution
user_returns = self.user_function(*user_args, **user_kwargs)
# Reestablish the hook if it was disabled
if restore_hook:
sys.setprofile(pro_f)
# Manage all the possible outputs of the task and build the return new
# types and values
def get_file_name(file_path):
return file_path.split(':')[-1]
python_mpi = False
if kwargs["python_MPI"]:
python_mpi = True
# Deal with INOUTs
for arg in [x for x in args if isinstance(x, parameter.TaskParameter) and self.is_parameter_object(x.name)]: # noqa
original_name = parameter.get_original_name(arg.name)
param = self.decorator_arguments.get(original_name,
self.get_default_direction(original_name)) # noqa
if (param.direction == parameter.DIRECTION.INOUT or
param.direction == parameter.DIRECTION.COMMUTATIVE) and \
not (arg.type == parameter.TYPE.EXTERNAL_PSCO or
is_psco(arg.content)):
# If it si INOUT and not PSCO, serialize to file
# We can not use here:
# param.type != parameter.TYPE.EXTERNAL_PSCO
# since param.type has the old type
if arg.type == parameter.TYPE.COLLECTION:
if not self.is_parameter_file_collection(arg.name):
def get_collection_objects(content, arg):
if arg.type == parameter.TYPE.COLLECTION:
for (new_content, elem) in zip(arg.content, arg.collection_content): # noqa
for sub_elem in get_collection_objects(new_content, elem): # noqa
yield sub_elem
else:
yield (content, arg)
for (content, elem) in get_collection_objects(arg.content, arg): # noqa
f_name = get_file_name(elem.file_name)
if python_mpi:
serialize_to_file_mpienv(content, f_name, False)
else:
serialize_to_file(content, f_name)
else:
f_name = get_file_name(arg.file_name)
if python_mpi:
serialize_to_file_mpienv(arg.content, f_name, False)
else:
serialize_to_file(arg.content, f_name)
# Deal with returns (if any)
if num_returns > 0:
if num_returns == 1:
# Generalize the return case to multi-return to simplify the
# code
user_returns = [user_returns]
elif num_returns > 1 and python_mpi:
def get_ret_rank(ret_params):
from mpi4py import MPI
return [ret_params[MPI.COMM_WORLD.rank]]
user_returns = [user_returns]
ret_params = get_ret_rank(ret_params)
# Note that we are implicitly assuming that the length of the user
# returns matches the number of return parameters
for (obj, param) in zip(user_returns, ret_params):
# If the object is a PSCO, do not serialize to file
if param.type == parameter.TYPE.EXTERNAL_PSCO or is_psco(obj):
continue
# Serialize the object
# Note that there is no "command line optimization" in the
# returns, as we always pass them as files.
# This is due to the asymmetry in worker-master communications
# and because it also makes it easier for us to deal with
# returns in that format
f_name = get_file_name(param.file_name)
if python_mpi:
if num_returns > 1:
rank_zero_reduce = False
else:
rank_zero_reduce = True
serialize_to_file_mpienv(obj, f_name, rank_zero_reduce)
else:
serialize_to_file(obj, f_name)
# We must notify COMPSs when types are updated
# Potential update candidates are returns and INOUTs
# But the whole types and values list must be returned
# new_types and new_values correspond to "parameters self returns"
new_types, new_values = [], []
# Add parameter types and value
params_start = 1 if has_self else 0
params_end = len(args) - num_returns + 1
# Update new_types and new_values with the args list
# The results parameter is a boolean to distinguish the error message.
for arg in args[params_start:params_end - 1]:
# Loop through the arguments and update new_types and new_values
if not isinstance(arg, parameter.TaskParameter):
raise Exception('ERROR: A task parameter arrived as an' +
' object instead as a TaskParameter' +
' when building the task result message.')
else:
original_name = parameter.get_original_name(arg.name)
param = self.decorator_arguments.get(original_name,
self.get_default_direction(original_name)) # noqa
if arg.type == parameter.TYPE.EXTERNAL_PSCO:
# It was originally a persistent object
new_types.append(parameter.TYPE.EXTERNAL_PSCO)
new_values.append(arg.key)
elif is_psco(arg.content) and \
param.direction != parameter.DIRECTION.IN:
# It was persisted in the task
new_types.append(parameter.TYPE.EXTERNAL_PSCO)
new_values.append(arg.content.getID())
else:
# Any other return object: same type and null value
new_types.append(arg.type)
new_values.append('null')
# Check old targetDirection
if 'targetDirection' in self.decorator_arguments:
target_label = 'targetDirection'
else:
target_label = 'target_direction'
# Add self type and value if exist
if has_self:
if self.decorator_arguments[target_label].direction == parameter.DIRECTION.INOUT: # noqa
# Check if self is a PSCO that has been persisted inside the
# task and target_direction.
# Update self type and value
self_type = parameter.get_compss_type(args[0])
if self_type == parameter.TYPE.EXTERNAL_PSCO:
self_value = args[0].getID()
else:
# Self can only be of type FILE, so avoid the last update
# of self_type
self_type = parameter.TYPE.FILE
self_value = 'null'
new_types.append(self_type)
new_values.append(self_value)
# Add return types and values
# Loop through the rest of the arguments and update new_types and
# new_values.
# assert len(args[params_end - 1:]) == len(user_returns)
# add_parameter_new_types_and_values(args[params_end - 1:], True)
if num_returns > 0:
for ret in user_returns:
ret_type = parameter.get_compss_type(ret)
if ret_type == parameter.TYPE.EXTERNAL_PSCO:
ret_value = ret.getID()
else:
# Returns can only be of type FILE, so avoid the last
# update of ret_type
ret_type = parameter.TYPE.FILE
ret_value = 'null'
new_types.append(ret_type)
new_values.append(ret_value)
return new_types, new_values, self.decorator_arguments[target_label]
def sequential_call(self, *args, **kwargs):
"""
The easiest case: just call the user function and return whatever it
returns.
:return: The user function
"""
# Inspect the user function, get information about the arguments and
# their names
# This defines self.param_args, self.param_varargs,
# self.param_kwargs and self.param_defaults
# And gives non-None default values to them if necessary
from pycompss.api.dummy.task import task as dummy_task
d_t = dummy_task(args, kwargs)
return d_t.__call__(self.user_function)(*args, **kwargs)
# task can be also typed as Task
task = Task
| mF2C/COMPSs | compss/programming_model/bindings/python/src/pycompss/api/task.py | Python | apache-2.0 | 71,968 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to extract MSIE zone information."""
import argparse
import logging
import sys
from dfvfs.helpers import volume_scanner as dfvfs_volume_scanner
from winregrc import msie_zone_info
from winregrc import output_writers
from winregrc import volume_scanner
class StdoutWriter(output_writers.StdoutOutputWriter):
"""Stdout output writer."""
_DEFAULT_ZONE_NAMES = {
'0': 'My Computer',
'1': 'Local Intranet',
'2': 'Trusted sites',
'3': 'Internet',
'4': 'Restricted sites'}
# Sources:
# http://support.microsoft.com/kb/182569
# http://technet.microsoft.com/en-us/library/cc783259(v=ws.10).aspx
_CONTROL_DESCRIPTIONS = {
'1001': 'Download signed ActiveX controls',
'1004': 'Download unsigned ActiveX controls',
'1200': 'Run ActiveX controls and plug-ins',
'1201': ('Initialize and script ActiveX controls not marked as safe for '
'scripting'),
'1206': 'Allow scripting of Internet Explorer Web browser control',
'1207': 'Reserved',
'1208': 'Allow previously unused ActiveX controls to run without prompt',
'1209': 'Allow Scriptlets',
'120A': 'Override Per-Site (domain-based) ActiveX restrictions',
'120B': 'Override Per-Site (domain-based) ActiveX restrictions',
'1400': 'Active scripting',
'1402': 'Scripting of Java applets',
'1405': 'Script ActiveX controls marked as safe for scripting',
'1406': 'Access data sources across domains',
'1407': 'Allow Programmatic clipboard access',
'1408': 'Reserved',
'1601': 'Submit non-encrypted form data',
'1604': 'Font download',
'1605': 'Run Java',
'1606': 'Userdata persistence',
'1607': 'Navigate sub-frames across different domains',
'1608': 'Allow META REFRESH',
'1609': 'Display mixed content',
'160A': 'Include local directory path when uploading files to a server',
'1800': 'Installation of desktop items',
'1802': 'Drag and drop or copy and paste files',
'1803': 'File Download',
'1804': 'Launching programs and files in an IFRAME',
'1805': 'Launching programs and files in webview',
'1806': 'Launching applications and unsafe files',
'1807': 'Reserved',
'1808': 'Reserved',
'1809': 'Use Pop-up Blocker',
'180A': 'Reserved',
'180B': 'Reserved',
'180C': 'Reserved',
'180D': 'Reserved',
'1A00': 'Logon',
'1A02': 'Allow persistent cookies that are stored on your computer',
'1A03': 'Allow per-session cookies (not stored)',
'1A04': ('Don\'t prompt for client certificate selection when no '
'certificates or only one certificate exists'),
'1A05': 'Allow 3rd party persistent cookies',
'1A06': 'Allow 3rd party session cookies',
'1A10': 'Privacy Settings',
'1C00': 'Java permissions',
'1E05': 'Software channel permissions',
'1F00': 'Reserved',
'2000': 'Binary and script behaviors',
'2001': 'Run components signed with Authenticode',
'2004': 'Run components not signed with Authenticode',
'2100': 'Open files based on content, not file extension',
'2101': ('Web sites in less privileged web content zone can navigate '
'into this zone'),
'2102': ('Allow script initiated windows without size or position '
'constraints'),
'2103': 'Allow status bar updates via script',
'2104': 'Allow websites to open windows without address or status bars',
'2105': 'Allow websites to prompt for information using scripted windows',
'2200': 'Automatic prompting for file downloads',
'2201': 'Automatic prompting for ActiveX controls',
'2300': 'Allow web pages to use restricted protocols for active content',
'2301': 'Use Phishing Filter',
'2400': '.NET Framework: XAML browser applications',
'2401': '.NET Framework: XPS documents',
'2402': '.NET Framework: Loose XAML',
'2500': 'Turn on Protected Mode [Vista only setting]',
'2600': 'Enable .NET Framework setup'}
_CONTROL_VALUES_COMMON_ENABLE = {
0x00000000: 'Enable',
0x00000001: 'Prompt',
0x00000003: 'Disable',
0x00010000: 'Administrator approved'}
_CONTROL_VALUES_COMMON_SAFETY = {
0x00010000: 'High safety',
0x00020000: 'Medium safety',
0x00030000: 'Low safety'}
_CONTROL_VALUES_1A00 = {
0x00000000: 'Automatic logon with current user name and password',
0x00010000: 'Prompt for user name and password',
0x00020000: 'Automatic logon only in Intranet zone',
0x00030000: 'Anonymous logon'}
_CONTROL_VALUES_1C00 = {
0x00000000: 'Disable Java',
0x00010000: 'High safety',
0x00020000: 'Medium safety',
0x00030000: 'Low safety',
0x00800000: 'Custom'}
def _GetControlValueDescription(self, control, control_value):
"""Retrieves the description of a specific control value.
Args:
control (str): control.
control_value (str): value to which the control is set.
Returns:
str: description of the control value or None if not available.
"""
if control in (
'1001', '1004', '1200', '1201', '1400', '1402', '1405', '1406', '1407',
'1601', '1604', '1606', '1607', '1608', '1609', '1800', '1802', '1803',
'1804', '1809', '1A04', '2000', '2001', '2004', '2100', '2101', '2102',
'2200', '2201', '2300'):
return self._CONTROL_VALUES_COMMON_ENABLE.get(control_value, None)
if control == '1A00':
return self._CONTROL_VALUES_1A00.get(control_value, None)
if control == '1C00':
return self._CONTROL_VALUES_1C00.get(control_value, None)
if control == '1E05':
return self._CONTROL_VALUES_COMMON_SAFETY.get(control_value, None)
return None
def WriteZoneInformation(self, zone_information):
"""Writes MSIE zone information to the output.
Args:
zone_information (MSIEZoneInformation): MSIE zone information.
"""
zone_name = zone_information.zone_name
if not zone_name:
zone_name = self._DEFAULT_ZONE_NAMES.get(zone_information.zone, None)
if zone_name:
text = 'Zone\t\t\t: {0:s} ({1:s})\n'.format(
zone_information.zone, zone_name)
else:
text = 'Zone\t\t\t: {0:s}\n'.format(zone_information.zone)
self.WriteText(text)
control_description = self._CONTROL_DESCRIPTIONS.get(
zone_information.control, None)
if control_description:
text = 'Control\t\t\t: {0:s} ({1:s})\n'.format(
zone_information.control, control_description)
else:
text = 'Control\t\t\t: {0:s}\n'.format(zone_information.control)
self.WriteText(text)
control_value_description = self._GetControlValueDescription(
zone_information.control, zone_information.control_value)
if control_value_description:
text = 'Value\t\t\t: {0!s} ({1:s})\n'.format(
zone_information.control_value, control_value_description)
else:
text = 'Value\t\t\t: {0!s}\n'.format(zone_information.control_value)
self.WriteText(text)
self.WriteText('\n')
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts the MSIE zone information from the Windows Registry.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='enable debug output.')
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH', default=None,
help=(
'path of the volume containing C:\\Windows, the filename of '
'a storage media image containing the C:\\Windows directory, '
'or the path of a NTUSER.DAT or SOFTWARE Registry file.'))
options = argument_parser.parse_args()
if not options.source:
print('Source value is missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
mediator = volume_scanner.WindowsRegistryVolumeScannerMediator()
scanner = volume_scanner.WindowsRegistryVolumeScanner(mediator=mediator)
volume_scanner_options = dfvfs_volume_scanner.VolumeScannerOptions()
volume_scanner_options.partitions = ['all']
volume_scanner_options.snapshots = ['none']
volume_scanner_options.volumes = ['none']
if not scanner.ScanForWindowsVolume(
options.source, options=volume_scanner_options):
print(('Unable to retrieve the volume with the Windows directory from: '
'{0:s}.').format(options.source))
print('')
return False
collector_object = msie_zone_info.MSIEZoneInformationCollector(
debug=options.debug)
output_writer_object = StdoutWriter()
if not output_writer_object.Open():
print('Unable to open output writer.')
print('')
return False
try:
has_results = False
for zone_information in collector_object.Collect(scanner.registry):
output_writer_object.WriteZoneInformation(zone_information)
has_results = True
finally:
output_writer_object.Close()
if not has_results:
print('No MSIE zone information found.')
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
| libyal/winreg-kb | scripts/msie_zone_info.py | Python | apache-2.0 | 9,437 |
"""
Module holds all stuff regarding Grinder tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import signal
import subprocess
import traceback
import re
import shutil
import urwid
from subprocess import CalledProcessError
from bzt.modules.moves import FancyURLopener, iteritems
from bzt.engine import ScenarioExecutor, Scenario, FileLister
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader
from bzt.utils import shell_exec
from bzt.utils import unzip, download_progress_hook, humanize_time
from bzt.modules.console import WidgetProvider
class GrinderExecutor(ScenarioExecutor, WidgetProvider, FileLister):
"""
Grinder executor module
"""
# OLD_DOWNLOAD_LINK = "http://switch.dl.sourceforge.net/project/grinder/The%20Grinder%203/{version}" \
# "/grinder-{version}-binary.zip"
DOWNLOAD_LINK = "http://sourceforge.net/projects/grinder/files/The%20Grinder%203/{version}" \
"/grinder-{version}-binary.zip/download"
VERSION = "3.11"
def __init__(self):
super(GrinderExecutor, self).__init__()
self.script = None
self.properties_file = None
self.kpi_file = None
self.process = None
self.start_time = None
self.end_time = None
self.retcode = None
self.reader = None
self.stdout_file = None
self.stderr_file = None
self.widget = None
def __write_base_props(self, fds):
"""
write base properties and base properties file contents to fds
:param fds: fds
:return:
"""
base_props_file = self.settings.get("properties-file", "")
if base_props_file:
fds.write("# Base Properies File Start: %s\n" % base_props_file)
with open(base_props_file) as bpf:
fds.write(bpf.read())
fds.write("# Base Properies File End: %s\n\n" % base_props_file)
# base props
base_props = self.settings.get("properties")
if base_props:
fds.write("# Base Properies Start\n")
for key, val in iteritems(base_props):
fds.write("%s=%s\n" % (key, val))
fds.write("# Base Properies End\n\n")
def __write_scenario_props(self, fds, scenario):
"""
Write scenario props and scenario file props to fds
:param fds:
:param scenario: dict
:return:
"""
script_props_file = scenario.get("properties-file", "")
if script_props_file:
fds.write(
"# Script Properies File Start: %s\n" % script_props_file)
with open(script_props_file) as spf:
fds.write(spf.read())
fds.write(
"# Script Properies File End: %s\n\n" % script_props_file)
# scenario props
local_props = scenario.get("properties")
if local_props:
fds.write("# Scenario Properies Start\n")
for key, val in iteritems(local_props):
fds.write("%s=%s\n" % (key, val))
fds.write("# Scenario Properies End\n\n")
def __write_bzt_props(self, fds):
"""
Write bzt properties to fds
:param fds:
:return:
"""
fds.write("# BZT Properies Start\n")
fds.write("grinder.hostID=grinder-bzt\n")
fds.write("grinder.script=%s\n" % os.path.realpath(self.script))
dirname = os.path.realpath(self.engine.artifacts_dir)
fds.write("grinder.logDirectory=%s\n" % dirname)
load = self.get_load()
if load.concurrency:
if load.ramp_up:
interval = int(1000 * load.ramp_up / load.concurrency)
fds.write("grinder.processIncrementInterval=%s\n" % interval)
fds.write("grinder.processes=%s\n" % int(load.concurrency))
fds.write("grinder.runs=%s\n" % load.iterations)
fds.write("grinder.processIncrement=1\n")
if load.duration:
fds.write("grinder.duration=%s\n" % int(load.duration * 1000))
fds.write("# BZT Properies End\n")
def prepare(self):
"""
:return:
"""
scenario = self.get_scenario()
self.__check_grinder()
if Scenario.SCRIPT in scenario:
self.script = self.engine.find_file(scenario[Scenario.SCRIPT])
self.engine.existing_artifact(self.script)
elif "requests" in scenario:
self.script = self.__scenario_from_requests()
else:
raise ValueError("There must be a scenario file to run Grinder")
self.properties_file = self.engine.create_artifact("grinder", ".properties")
with open(self.properties_file, 'w') as fds:
self.__write_base_props(fds)
self.__write_scenario_props(fds, scenario)
self.__write_bzt_props(fds)
# modify file path in script
with open(self.properties_file, 'rt') as fds:
prop_contents = fds.read()
resource_files, modified_contents = self.__get_res_files_from_script(prop_contents)
if resource_files:
with open(self.properties_file, 'wt') as fds:
fds.write(modified_contents)
# FIXME: multi-grinder executions have different names
self.kpi_file = os.path.join(self.engine.artifacts_dir, "grinder-bzt-kpi.log")
self.reader = DataLogReader(self.kpi_file, self.log)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
def startup(self):
"""
Should start the tool as fast as possible.
"""
cmdline = ["java", "-classpath",
os.path.dirname(__file__) + os.path.pathsep + os.path.realpath(self.settings.get("path"))]
cmdline += ["net.grinder.Grinder", self.properties_file]
self.start_time = time.time()
out = self.engine.create_artifact("grinder-stdout", ".log")
err = self.engine.create_artifact("grinder-stderr", ".log")
self.stdout_file = open(out, "w")
self.stderr_file = open(err, "w")
self.process = shell_exec(cmdline, cwd=self.engine.artifacts_dir,
stdout=self.stdout_file,
stderr=self.stderr_file)
def check(self):
"""
Checks if tool is still running. Also checks if resulting logs contains
any data and throws exception otherwise.
:return: bool
:raise RuntimeWarning:
"""
if self.widget:
self.widget.update()
self.retcode = self.process.poll()
if self.retcode is not None:
if self.retcode != 0:
self.log.info("Grinder exit code: %s", self.retcode)
raise RuntimeError("Grinder exited with non-zero code")
if self.kpi_file:
if not os.path.exists(self.kpi_file) \
or not os.path.getsize(self.kpi_file):
msg = "Empty results log, most likely the tool failed: %s"
raise RuntimeWarning(msg % self.kpi_file)
return True
return False
def post_process(self):
"""
Collect data file artifact
"""
if self.kpi_file:
self.engine.existing_artifact(self.kpi_file)
def shutdown(self):
"""
If tool is still running - let's stop it.
"""
while self.process and self.process.poll() is None:
self.log.info("Terminating Grinder PID: %s", self.process.pid)
time.sleep(1)
try:
os.killpg(self.process.pid, signal.SIGTERM)
except OSError as exc:
self.log.debug("Failed to terminate: %s", exc)
if self.stdout_file:
self.stdout_file.close()
if self.stderr_file:
self.stderr_file.close()
if self.start_time:
self.end_time = time.time()
self.log.debug("Grinder worked for %s seconds",
self.end_time - self.start_time)
def __scenario_from_requests(self):
"""
Generate grinder scenario from requests
:return: script
"""
script = self.engine.create_artifact("requests", ".py")
tpl = os.path.join(os.path.dirname(__file__), "grinder-requests.tpl")
self.log.debug("Generating grinder scenario: %s", tpl)
with open(script, 'w') as fds:
with open(tpl) as tds:
fds.write(tds.read())
for request in self.get_scenario().get_requests():
line = '\t\trequest.%s("%s")\n' % (request.method, request.url)
fds.write(line)
return script
def __grinder(self, grinder_full_path):
"""Check if grinder installed"""
# java -classpath /home/user/Downloads/grinder-3.11/lib/grinder.jar net.grinder.Grinder --help
# CHECK ERRORLEVEL TO BE SURE
self.log.debug("Trying grinder: %s", grinder_full_path)
grinder_launch_command = ["java", "-classpath", grinder_full_path, "net.grinder.Grinder"]
# print "grinder_launch command:", grinder_launch_command
grinder_subprocess = subprocess.Popen(grinder_launch_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
_process_output = grinder_subprocess.communicate()[0]
if grinder_subprocess.returncode != 0:
raise CalledProcessError(grinder_subprocess.returncode, " ".join(grinder_launch_command))
# grinder_subprocess = subprocess.check_output(["java -classpath " + grinder_full_path + "grinder.jar net.grinder.Grinder", '--help'], stderr=subprocess.STDOUT)
self.log.debug("grinder check: %s", _process_output)
def __check_grinder(self):
"""
Checks if Grinder is available, otherwise download and install it.
"""
grinder_path = self.settings.get("path", "~/.bzt/grinder-taurus/lib/grinder.jar")
grinder_path = os.path.abspath(os.path.expanduser(grinder_path))
self.settings['path'] = grinder_path
try:
self.__grinder(grinder_path)
return
except (OSError, CalledProcessError):
self.log.debug("Failed to run grinder: %s", traceback.format_exc())
try:
jout = subprocess.check_output(["java", '-version'], stderr=subprocess.STDOUT)
self.log.debug("Java check: %s", jout)
except BaseException:
self.log.warning("Failed to run java: %s", traceback.format_exc())
raise RuntimeError("The 'java' is not operable or not available. Consider installing it")
self.settings['path'] = self.__install_grinder(grinder_path)
self.__grinder(self.settings['path'])
def __install_grinder(self, grinder_path):
"""
Installs Grinder.
Grinder version and download link may be set in config:
"download-link":"http://domain/resource-{version}.zip"
"version":"1.2.3"
"""
dest = os.path.dirname(os.path.dirname(os.path.expanduser(grinder_path)))
if not dest:
dest = os.path.expanduser("~/.bzt/grinder-taurus")
dest = os.path.abspath(dest)
grinder_full_path = os.path.join(dest, "lib", "grinder.jar")
try:
self.__grinder(grinder_full_path)
return grinder_full_path
except CalledProcessError:
self.log.info("Will try to install grinder into %s", dest)
downloader = FancyURLopener()
grinder_zip_path = self.engine.create_artifact("grinder-dist", ".zip")
version = self.settings.get("version", GrinderExecutor.VERSION)
download_link = self.settings.get("download-link", GrinderExecutor.DOWNLOAD_LINK)
download_link = download_link.format(version=version)
self.log.info("Downloading %s", download_link)
try:
downloader.retrieve(download_link, grinder_zip_path, download_progress_hook)
except BaseException as exc:
self.log.error("Error while downloading %s", download_link)
raise exc
self.log.info("Unzipping %s", grinder_zip_path)
unzip(grinder_zip_path, dest, 'grinder-' + version)
os.remove(grinder_zip_path)
self.log.info("Installed grinder successfully")
return grinder_full_path
def get_widget(self):
if not self.widget:
self.widget = GrinderWidget(self)
return self.widget
def resource_files(self):
"""
:return:
"""
resource_files = []
prop_file = self.get_scenario().get("properties-file")
if prop_file:
prop_file_contents = open(prop_file, 'rt').read()
resource_files, modified_contents = self.__get_res_files_from_script(prop_file_contents)
if resource_files:
self.__cp_res_files_to_artifacts_dir(resource_files)
script_name, script_ext = os.path.splitext(prop_file)
script_name = os.path.basename(script_name)
modified_script = self.engine.create_artifact(script_name, script_ext)
with open(modified_script, 'wt') as _fds:
_fds.write(modified_contents)
resource_files.append(modified_script)
else:
shutil.copy2(prop_file, self.engine.artifacts_dir)
resource_files.append(prop_file)
return [os.path.basename(x) for x in resource_files]
def __cp_res_files_to_artifacts_dir(self, resource_files_list):
"""
:param file_list:
:return:
"""
for resource_file in resource_files_list:
if os.path.exists(resource_file):
try:
shutil.copy(resource_file, self.engine.artifacts_dir)
except BaseException:
self.log.warning("Cannot copy file: %s", resource_file)
else:
self.log.warning("File not found: %s", resource_file)
def __get_res_files_from_script(self, prop_file_contents):
"""
if "script" in scenario:
add script file to resources and override script name in .prop file
else:
take script name from .prop file and add it to resources
:param prop_file_contents:
:return: list of resource files and contents of .prop file
"""
resource_files = []
script_file_path = self.get_scenario().get("script")
search_pattern = re.compile(r"grinder\.script.*")
found_pattern = search_pattern.findall(prop_file_contents)[-1] # take last
file_path_in_prop = found_pattern.split("=")[-1].strip()
if script_file_path:
resource_files.append(script_file_path)
prop_file_contents = prop_file_contents.replace(file_path_in_prop, os.path.basename(script_file_path))
else:
resource_files.append(file_path_in_prop)
return resource_files, prop_file_contents
class DataLogReader(ResultsReader):
""" Class to read KPI from data log """
def __init__(self, filename, parent_logger):
super(DataLogReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.filename = filename
self.fds = None
self.idx = {}
self.partial_buffer = ""
self.delimiter = ","
self.offset = 0
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:param last_pass:
"""
while not self.fds and not self.__open_fds():
self.log.debug("No data to start reading yet")
yield None
self.log.debug("Reading grinder results")
self.fds.seek(self.offset) # without this we have a stuck reads on Mac
if last_pass:
lines = self.fds.readlines() # unlimited
else:
lines = self.fds.readlines(1024 * 1024) # 1MB limit to read
self.offset = self.fds.tell()
for line in lines:
if not line.endswith("\n"):
self.partial_buffer += line
continue
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
line = line.strip()
fields = line.split(self.delimiter)
if not fields[1].strip().isdigit():
self.log.debug("Skipping line: %s", line)
continue
t_stamp = int(fields[self.idx["Start time (ms since Epoch)"]]) / 1000.0
label = ""
r_time = int(fields[self.idx["Test time"]]) / 1000.0
latency = int(fields[self.idx["Time to first byte"]]) / 1000.0
r_code = fields[self.idx["HTTP response code"]].strip()
con_time = int(fields[self.idx["Time to resolve host"]]) / 1000.0
con_time += int(fields[self.idx["Time to establish connection"]]) / 1000.0
if int(fields[self.idx["Errors"]]):
error = "There were some errors in Grinder test"
else:
error = None
concur = None # TODO: how to get this for grinder
yield int(t_stamp), label, concur, r_time, con_time, latency, r_code, error, ''
def __open_fds(self):
"""
opens grinder-bzt-kpi.log
"""
if not os.path.isfile(self.filename):
self.log.debug("File not appeared yet")
return False
if not os.path.getsize(self.filename):
self.log.debug("File is empty: %s", self.filename)
return False
self.fds = open(self.filename)
header = self.fds.readline().strip().split(self.delimiter)
for _ix, field in enumerate(header):
self.idx[field.strip()] = _ix
return True
class GrinderWidget(urwid.Pile):
"""
Progress sidebar widget
:type executor: bzt.modules.grinder.GrinderExecutor
"""
def __init__(self, executor):
self.executor = executor
self.dur = executor.get_load().duration
widgets = []
if self.executor.script:
self.script_name = urwid.Text("Script: %s" % os.path.basename(self.executor.script))
widgets.append(self.script_name)
if self.dur:
self.progress = urwid.ProgressBar('pb-en', 'pb-dis', done=self.dur)
else:
self.progress = urwid.Text("Running...")
widgets.append(self.progress)
self.elapsed = urwid.Text("Elapsed: N/A")
self.eta = urwid.Text("ETA: N/A", align=urwid.RIGHT)
widgets.append(urwid.Columns([self.elapsed, self.eta]))
super(GrinderWidget, self).__init__(widgets)
def update(self):
"""
Refresh widget values
"""
if self.executor.start_time:
elapsed = time.time() - self.executor.start_time
self.elapsed.set_text("Elapsed: %s" % humanize_time(elapsed))
if self.dur:
eta = self.dur - elapsed
if eta >= 0:
self.eta.set_text("ETA: %s" % humanize_time(eta))
else:
over = elapsed - self.dur
self.eta.set_text("Overtime: %s" % humanize_time(over))
else:
self.eta.set_text("")
if isinstance(self.progress, urwid.ProgressBar):
self.progress.set_completion(elapsed)
self._invalidate()
| Nefry/taurus | bzt/modules/grinder.py | Python | apache-2.0 | 20,175 |
#! /usr/bin/env python
# A script for cernet login at XJTU
import urllib
import urllib.parse
import urllib.request
import sys
username = 'your_username'
password = 'your_password'
url = 'http://10.6.8.2/cgi-bin/srun_portal'
login_data = {
'action': 'login',
'username': username,
'password': password,
'ac_id': 1,
'type': 1
}
logout_data = {
'action': 'logout',
'ac_id': 1
}
raw_data = logout_data if len(sys.argv) == 2 and sys.argv[1] == "logout" else login_data
data = urllib.parse.urlencode(raw_data).encode('utf-8')
request = urllib.request.Request(url, data)
response = urllib.request.urlopen(request)
print(response.read().decode('utf-8'))
| JingkaiTang/XJTU_CERNET | cernet.py | Python | apache-2.0 | 684 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common utilities module.
"""
import random
import re
import string
from jsonpath_rw import parse
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import timeutils
import requests
import urllib
from senlin.common import consts
from senlin.common import exception
from senlin.common.i18n import _
from senlin.objects import service as service_obj
cfg.CONF.import_opt('max_response_size', 'senlin.conf')
cfg.CONF.import_opt('periodic_interval', 'senlin.conf')
LOG = logging.getLogger(__name__)
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
class URLFetchError(exception.Error, IOError):
pass
def get_positive_int(v):
"""Util function converting/checking a value of positive integer.
:param v: A value to be checked.
:returns: (b, v) where v is (converted) value if bool is True.
b is False if the value fails validation.
"""
if strutils.is_int_like(v):
count = int(v)
if count > 0:
return True, count
return False, 0
def parse_level_values(values):
"""Parse a given list of level values to numbers.
:param values: A list of event level values.
:return: A list of translated values.
"""
if not isinstance(values, list):
values = [values]
result = []
for v in values:
if v in consts.EVENT_LEVELS:
result.append(consts.EVENT_LEVELS[v])
elif isinstance(v, int):
result.append(v)
if result == []:
return None
return result
def level_from_number(value):
"""Parse a given level value(from number to string).
:param value: event level number.
:return: A translated value.
"""
n = int(value)
levels = {value: key for key, value in consts.EVENT_LEVELS.items()}
return levels.get(n, None)
def url_fetch(url, timeout=1, allowed_schemes=('http', 'https'), verify=True):
"""Get the data at the specified URL.
The URL must use the http: or https: schemes.
The file: scheme is also supported if you override
the allowed_schemes argument.
Raise an IOError if getting the data fails.
"""
components = urllib.parse.urlparse(url)
if components.scheme not in allowed_schemes:
raise URLFetchError(_('Invalid URL scheme %s') % components.scheme)
if components.scheme == 'file':
try:
return urllib.request.urlopen(url, timeout=timeout).read()
except urllib.error.URLError as uex:
raise URLFetchError(_('Failed to retrieve data: %s') % uex)
try:
resp = requests.get(url, stream=True, verify=verify, timeout=timeout)
resp.raise_for_status()
# We cannot use resp.text here because it would download the entire
# file, and a large enough file would bring down the engine. The
# 'Content-Length' header could be faked, so it's necessary to
# download the content in chunks to until max_response_size is reached.
# The chunk_size we use needs to balance CPU-intensive string
# concatenation with accuracy (eg. it's possible to fetch 1000 bytes
# greater than max_response_size with a chunk_size of 1000).
reader = resp.iter_content(chunk_size=1000)
result = ""
for chunk in reader:
if isinstance(chunk, bytes):
chunk = chunk.decode('utf-8')
result += chunk
if len(result) > cfg.CONF.max_response_size:
raise URLFetchError("Data exceeds maximum allowed size (%s"
" bytes)" % cfg.CONF.max_response_size)
return result
except requests.exceptions.RequestException as ex:
raise URLFetchError(_('Failed to retrieve data: %s') % ex)
def random_name(length=8):
if length <= 0:
return ''
lead = random.choice(string.ascii_letters)
tail = ''.join(random.choice(string.ascii_letters + string.digits)
for _ in range(length - 1))
return lead + tail
def format_node_name(fmt, cluster, index):
"""Generates a node name using the given format.
:param fmt: A string containing format directives. Currently we only
support the following keys:
- "$nR": a random string with at most 'n' characters where
'n' defaults to 8.
- "$nI": a string representation of the node index where 'n'
instructs the number of digits generated with 0s
padded to the left.
:param cluster: The DB object for the cluster to which the node belongs.
This parameter is provided for future extension.
:param index: The index for the node in the target cluster.
:returns: A string containing the generated node name.
"""
# for backward compatibility
if not fmt:
fmt = "node-$8R"
result = ""
last = 0
pattern = re.compile("(\$\d{0,8}[rRI])")
for m in pattern.finditer(fmt):
group = m.group()
t = group[-1]
width = group[1:-1]
if t == "R" or t == "r": # random string
if width != "":
sub = random_name(int(width))
else:
sub = random_name(8)
if t == "r":
sub = sub.lower()
elif t == "I": # node index
if width != "":
str_index = str(index)
sub = str_index.zfill(int(width))
else:
sub = str(index)
result += fmt[last:m.start()] + sub
last = m.end()
result += fmt[last:]
return result
def isotime(at):
"""Stringify time in ISO 8601 format.
oslo.versionedobject is using this function for datetime formatting.
"""
if at is None:
return None
st = at.strftime(_ISO8601_TIME_FORMAT)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' or tz == "UTC+00:00" else tz)
return st
def get_path_parser(path):
"""Get a JsonPath parser based on a path string.
:param path: A string containing a JsonPath.
:returns: A parser used for path matching.
:raises: An exception of `BadRequest` if the path fails validation.
"""
try:
expr = parse(path)
except Exception as ex:
error_text = str(ex)
error_msg = error_text.split(':', 1)[1]
raise exception.BadRequest(
msg=_("Invalid attribute path - %s") % error_msg.strip())
return expr
def is_engine_dead(ctx, engine_id, duration=None):
"""Check if an engine is dead.
If engine hasn't reported its status for the given duration, it is treated
as a dead engine.
:param ctx: A request context.
:param engine_id: The ID of the engine to test.
:param duration: The time duration in seconds.
"""
if not duration:
duration = 2 * cfg.CONF.periodic_interval
eng = service_obj.Service.get(ctx, engine_id)
if not eng:
return True
if timeutils.is_older_than(eng.updated_at, duration):
return True
return False
| openstack/senlin | senlin/common/utils.py | Python | apache-2.0 | 7,679 |
#!/usr/bin/env python
"""A more advanced Reducer, using Python iterators and generators."""
import sys
print_format = "{0}{1}{2}"
def main(separator='\t'):
cantidad_total = 0
suma_total = 0
for line in sys.stdin:
suma, cantidad = line.rstrip().split(separator, 1)
cantidad_total += int(cantidad)
suma_total += float(suma)
print print_format.format(suma_total, separator, cantidad_total)
if __name__ == "__main__":
main()
| santiavenda2/hadoop-with-python | src/variance/mean_reducer.py | Python | apache-2.0 | 471 |
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Embl (GFF3, GTF) File Parser for DataFile Scraper package."""
from dataclasses import dataclass
import re
from typing import Optional
from ..utils import get_group
from .base import BaseOptMetadata, FileParser, _FILE_COMPRESSIONS_MAP
@dataclass
class EMBLOptMetadata(BaseOptMetadata):
compression: Optional[str]
content_type: Optional[str]
sorting: Optional[str]
class EMBLFileParser(FileParser):
FILENAMES_RE = re.compile(
(
r"^(?P<species>\w+)\.(?P<assembly>[\w\-\.]+?)\.(5|1)\d{1,2}\."
r"(?P<content_type>abinitio|chr|chr_patch_hapl_scaff|nonchromosomal|(chromosome|plasmid|scaffold)\.[\w\-\.]+?|primary_assembly[\w\-\.]*?|(chromosome_)?group\.\w+?)?.*?"
)
)
FILE_EXT_RE = re.compile(
(
r".*?\.(?P<file_extension>gff3|gtf|dat)"
r"(\.(?P<sorted>sorted))?(\.(?P<compression>gz))?$"
)
)
def get_optional_metadata(self, metadata: dict) -> EMBLOptMetadata:
match = self.FILENAMES_RE.match(metadata["file_name"])
matched_content_type = get_group("content_type", match)
match = self.FILE_EXT_RE.match(metadata["file_name"])
matched_compression = _FILE_COMPRESSIONS_MAP.get(get_group("compression", match))
file_extension = get_group("file_extension", match)
matched_sorting = get_group("sorted", match)
compression = (
metadata.get("extras", {}).get("compression") or matched_compression
)
content_type = (
metadata.get("extras", {}).get("content_type") or matched_content_type
)
sorting = metadata.get("extras", {}).get("sorting") or matched_sorting
optional_data = EMBLOptMetadata(
compression=compression,
file_extension=file_extension,
content_type=content_type,
sorting=sorting,
)
return optional_data | Ensembl/ensembl-production | src/python/ensembl/production/datafile/scraper/parsers/embl.py | Python | apache-2.0 | 2,568 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.classification import (
ClassificationPredictionResult,
)
from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_object_detection import (
ImageObjectDetectionPredictionResult,
)
from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.image_segmentation import (
ImageSegmentationPredictionResult,
)
from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_classification import (
TabularClassificationPredictionResult,
)
from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.tabular_regression import (
TabularRegressionPredictionResult,
)
from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_extraction import (
TextExtractionPredictionResult,
)
from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.text_sentiment import (
TextSentimentPredictionResult,
)
from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_action_recognition import (
VideoActionRecognitionPredictionResult,
)
from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_classification import (
VideoClassificationPredictionResult,
)
from google.cloud.aiplatform.v1.schema.predict.prediction_v1.types.video_object_tracking import (
VideoObjectTrackingPredictionResult,
)
__all__ = (
"ClassificationPredictionResult",
"ImageObjectDetectionPredictionResult",
"ImageSegmentationPredictionResult",
"TabularClassificationPredictionResult",
"TabularRegressionPredictionResult",
"TextExtractionPredictionResult",
"TextSentimentPredictionResult",
"VideoActionRecognitionPredictionResult",
"VideoClassificationPredictionResult",
"VideoObjectTrackingPredictionResult",
)
| sasha-gitg/python-aiplatform | google/cloud/aiplatform/v1/schema/predict/prediction/__init__.py | Python | apache-2.0 | 2,407 |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.ports \
import forms as project_forms
LOG = logging.getLogger(__name__)
class CreatePort(forms.SelfHandlingForm):
network_name = forms.CharField(label=_("Network Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
network_id = forms.CharField(label=_("Network ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
name = forms.CharField(max_length=255,
label=_("Name"),
required=False)
admin_state = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
device_id = forms.CharField(max_length=100, label=_("Device ID"),
help_text=_("Device ID attached to the port"),
required=False)
device_owner = forms.CharField(max_length=100, label=_("Device Owner"),
help_text=_("Device owner attached to the "
"port"),
required=False)
def handle(self, request, data):
try:
# We must specify tenant_id of the network which a subnet is
# created for if admin user does not belong to the tenant.
network = api.neutron.network_get(request, data['network_id'])
data['tenant_id'] = network.tenant_id
data['admin_state_up'] = data['admin_state']
del data['network_name']
del data['admin_state']
port = api.neutron.port_create(request, **data)
msg = _('Port %s was successfully created.') % port['id']
LOG.debug(msg)
messages.success(request, msg)
return port
except Exception:
msg = _('Failed to create a port for network %s') \
% data['network_id']
LOG.info(msg)
redirect = reverse('horizon:admin:networks:detail',
args=(data['network_id'],))
exceptions.handle(request, msg, redirect=redirect)
class UpdatePort(project_forms.UpdatePort):
#tenant_id = forms.CharField(widget=forms.HiddenInput())
device_id = forms.CharField(max_length=100, label=_("Device ID"),
help_text=_("Device ID attached to the port"),
required=False)
device_owner = forms.CharField(max_length=100, label=_("Device Owner"),
help_text=_("Device owner attached to the "
"port"),
required=False)
failure_url = 'horizon:admin:networks:detail'
def handle(self, request, data):
try:
LOG.debug('params = %s' % data)
port = api.neutron.port_update(request, data['port_id'],
name=data['name'],
admin_state_up=data['admin_state'],
device_id=data['device_id'],
device_owner=data['device_owner'])
msg = _('Port %s was successfully updated.') % data['port_id']
LOG.debug(msg)
messages.success(request, msg)
return port
except Exception:
msg = _('Failed to update port %s') % data['port_id']
LOG.info(msg)
redirect = reverse(self.failure_url,
args=[data['network_id']])
exceptions.handle(request, msg, redirect=redirect)
| spandanb/horizon | openstack_dashboard/dashboards/admin/networks/ports/forms.py | Python | apache-2.0 | 4,657 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import copy
import logging
import os
import pickle
import six
import time
import tempfile
import ray
from ray.exceptions import RayError
from ray.rllib.models import MODEL_DEFAULTS
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.evaluation.metrics import collect_metrics
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from ray.rllib.evaluation.worker_set import WorkerSet
from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI
from ray.rllib.utils import FilterManager, deep_update, merge_dicts
from ray.rllib.utils.memory import ray_get_and_free
from ray.rllib.utils import try_import_tf
from ray.tune.registry import ENV_CREATOR, register_env, _global_registry
from ray.tune.trainable import Trainable
from ray.tune.trial import ExportFormat
from ray.tune.resources import Resources
from ray.tune.logger import UnifiedLogger
from ray.tune.result import DEFAULT_RESULTS_DIR
tf = try_import_tf()
logger = logging.getLogger(__name__)
# Max number of times to retry a worker failure. We shouldn't try too many
# times in a row since that would indicate a persistent cluster issue.
MAX_WORKER_FAILURE_RETRIES = 3
# yapf: disable
# __sphinx_doc_begin__
COMMON_CONFIG = {
# === Debugging ===
# Whether to write episode stats and videos to the agent log dir
"monitor": False,
# Set the ray.rllib.* log level for the agent process and its workers.
# Should be one of DEBUG, INFO, WARN, or ERROR. The DEBUG level will also
# periodically print out summaries of relevant internal dataflow (this is
# also printed out once at startup at the INFO level).
"log_level": "INFO",
# Callbacks that will be run during various phases of training. These all
# take a single "info" dict as an argument. For episode callbacks, custom
# metrics can be attached to the episode by updating the episode object's
# custom metrics dict (see examples/custom_metrics_and_callbacks.py). You
# may also mutate the passed in batch data in your callback.
"callbacks": {
"on_episode_start": None, # arg: {"env": .., "episode": ...}
"on_episode_step": None, # arg: {"env": .., "episode": ...}
"on_episode_end": None, # arg: {"env": .., "episode": ...}
"on_sample_end": None, # arg: {"samples": .., "worker": ...}
"on_train_result": None, # arg: {"trainer": ..., "result": ...}
"on_postprocess_traj": None, # arg: {
# "agent_id": ..., "episode": ...,
# "pre_batch": (before processing),
# "post_batch": (after processing),
# "all_pre_batches": (other agent ids),
# }
},
# Whether to attempt to continue training if a worker crashes.
"ignore_worker_failures": False,
# Log system resource metrics to results.
"log_sys_usage": True,
# Enable TF eager execution (TF policies only).
"eager": False,
# Enable tracing in eager mode. This greatly improves performance, but
# makes it slightly harder to debug since Python code won't be evaluated
# after the initial eager pass.
"eager_tracing": False,
# Disable eager execution on workers (but allow it on the driver). This
# only has an effect is eager is enabled.
"no_eager_on_workers": False,
# === Policy ===
# Arguments to pass to model. See models/catalog.py for a full list of the
# available model options.
"model": MODEL_DEFAULTS,
# Arguments to pass to the policy optimizer. These vary by optimizer.
"optimizer": {},
# === Environment ===
# Discount factor of the MDP
"gamma": 0.99,
# Number of steps after which the episode is forced to terminate. Defaults
# to `env.spec.max_episode_steps` (if present) for Gym envs.
"horizon": None,
# Calculate rewards but don't reset the environment when the horizon is
# hit. This allows value estimation and RNN state to span across logical
# episodes denoted by horizon. This only has an effect if horizon != inf.
"soft_horizon": False,
# Don't set 'done' at the end of the episode. Note that you still need to
# set this if soft_horizon=True, unless your env is actually running
# forever without returning done=True.
"no_done_at_end": False,
# Arguments to pass to the env creator
"env_config": {},
# Environment name can also be passed via config
"env": None,
# Whether to clip rewards prior to experience postprocessing. Setting to
# None means clip for Atari only.
"clip_rewards": None,
# Whether to np.clip() actions to the action space low/high range spec.
"clip_actions": True,
# Whether to use rllib or deepmind preprocessors by default
"preprocessor_pref": "deepmind",
# The default learning rate
"lr": 0.0001,
# === Evaluation ===
# Evaluate with every `evaluation_interval` training iterations.
# The evaluation stats will be reported under the "evaluation" metric key.
# Note that evaluation is currently not parallelized, and that for Ape-X
# metrics are already only reported for the lowest epsilon workers.
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# Extra arguments to pass to evaluation workers.
# Typical usage is to pass extra args to evaluation env creator
# and to disable exploration by computing deterministic actions
# TODO(kismuz): implement determ. actions and include relevant keys hints
"evaluation_config": {},
# === Resources ===
# Number of actors used for parallelism
"num_workers": 2,
# Number of GPUs to allocate to the trainer process. Note that not all
# algorithms can take advantage of trainer GPUs. This can be fractional
# (e.g., 0.3 GPUs).
"num_gpus": 0,
# Number of CPUs to allocate per worker.
"num_cpus_per_worker": 1,
# Number of GPUs to allocate per worker. This can be fractional.
"num_gpus_per_worker": 0,
# Any custom resources to allocate per worker.
"custom_resources_per_worker": {},
# Number of CPUs to allocate for the trainer. Note: this only takes effect
# when running in Tune.
"num_cpus_for_driver": 1,
# === Memory quota ===
# You can set these memory quotas to tell Ray to reserve memory for your
# training run. This guarantees predictable execution, but the tradeoff is
# if your workload exceeeds the memory quota it will fail.
# Heap memory to reserve for the trainer process (0 for unlimited). This
# can be large if your are using large train batches, replay buffers, etc.
"memory": 0,
# Object store memory to reserve for the trainer process. Being large
# enough to fit a few copies of the model weights should be sufficient.
# This is enabled by default since models are typically quite small.
"object_store_memory": 0,
# Heap memory to reserve for each worker. Should generally be small unless
# your environment is very heavyweight.
"memory_per_worker": 0,
# Object store memory to reserve for each worker. This only needs to be
# large enough to fit a few sample batches at a time. This is enabled
# by default since it almost never needs to be larger than ~200MB.
"object_store_memory_per_worker": 0,
# === Execution ===
# Number of environments to evaluate vectorwise per worker.
"num_envs_per_worker": 1,
# Default sample batch size (unroll length). Batches of this size are
# collected from workers until train_batch_size is met. When using
# multiple envs per worker, this is multiplied by num_envs_per_worker.
"sample_batch_size": 200,
# Training batch size, if applicable. Should be >= sample_batch_size.
# Samples batches will be concatenated together to this size for training.
"train_batch_size": 200,
# Whether to rollout "complete_episodes" or "truncate_episodes"
"batch_mode": "truncate_episodes",
# Use a background thread for sampling (slightly off-policy, usually not
# advisable to turn on unless your env specifically requires it)
"sample_async": False,
# Element-wise observation filter, either "NoFilter" or "MeanStdFilter"
"observation_filter": "NoFilter",
# Whether to synchronize the statistics of remote filters.
"synchronize_filters": True,
# Configure TF for single-process operation by default
"tf_session_args": {
# note: overriden by `local_tf_session_args`
"intra_op_parallelism_threads": 2,
"inter_op_parallelism_threads": 2,
"gpu_options": {
"allow_growth": True,
},
"log_device_placement": False,
"device_count": {
"CPU": 1
},
"allow_soft_placement": True, # required by PPO multi-gpu
},
# Override the following tf session args on the local worker
"local_tf_session_args": {
# Allow a higher level of parallelism by default, but not unlimited
# since that can cause crashes with many concurrent drivers.
"intra_op_parallelism_threads": 8,
"inter_op_parallelism_threads": 8,
},
# Whether to LZ4 compress individual observations
"compress_observations": False,
# Wait for metric batches for at most this many seconds. Those that
# have not returned in time will be collected in the next iteration.
"collect_metrics_timeout": 180,
# Smooth metrics over this many episodes.
"metrics_smoothing_episodes": 100,
# If using num_envs_per_worker > 1, whether to create those new envs in
# remote processes instead of in the same worker. This adds overheads, but
# can make sense if your envs can take much time to step / reset
# (e.g., for StarCraft). Use this cautiously; overheads are significant.
"remote_worker_envs": False,
# Timeout that remote workers are waiting when polling environments.
# 0 (continue when at least one env is ready) is a reasonable default,
# but optimal value could be obtained by measuring your environment
# step / reset and model inference perf.
"remote_env_batch_wait_ms": 0,
# Minimum time per iteration
"min_iter_time_s": 0,
# Minimum env steps to optimize for per train call. This value does
# not affect learning, only the length of iterations.
"timesteps_per_iteration": 0,
# This argument, in conjunction with worker_index, sets the random seed of
# each worker, so that identically configured trials will have identical
# results. This makes experiments reproducible.
"seed": None,
# === Offline Datasets ===
# Specify how to generate experiences:
# - "sampler": generate experiences via online simulation (default)
# - a local directory or file glob expression (e.g., "/tmp/*.json")
# - a list of individual file paths/URIs (e.g., ["/tmp/1.json",
# "s3://bucket/2.json"])
# - a dict with string keys and sampling probabilities as values (e.g.,
# {"sampler": 0.4, "/tmp/*.json": 0.4, "s3://bucket/expert.json": 0.2}).
# - a function that returns a rllib.offline.InputReader
"input": "sampler",
# Specify how to evaluate the current policy. This only has an effect when
# reading offline experiences. Available options:
# - "wis": the weighted step-wise importance sampling estimator.
# - "is": the step-wise importance sampling estimator.
# - "simulation": run the environment in the background, but use
# this data for evaluation only and not for learning.
"input_evaluation": ["is", "wis"],
# Whether to run postprocess_trajectory() on the trajectory fragments from
# offline inputs. Note that postprocessing will be done using the *current*
# policy, not the *behaviour* policy, which is typically undesirable for
# on-policy algorithms.
"postprocess_inputs": False,
# If positive, input batches will be shuffled via a sliding window buffer
# of this number of batches. Use this if the input data is not in random
# enough order. Input is delayed until the shuffle buffer is filled.
"shuffle_buffer_size": 0,
# Specify where experiences should be saved:
# - None: don't save any experiences
# - "logdir" to save to the agent log dir
# - a path/URI to save to a custom output directory (e.g., "s3://bucket/")
# - a function that returns a rllib.offline.OutputWriter
"output": None,
# What sample batch columns to LZ4 compress in the output data.
"output_compress_columns": ["obs", "new_obs"],
# Max output file size before rolling over to a new file.
"output_max_file_size": 64 * 1024 * 1024,
# === Multiagent ===
"multiagent": {
# Map from policy ids to tuples of (policy_cls, obs_space,
# act_space, config). See rollout_worker.py for more info.
"policies": {},
# Function mapping agent ids to policy ids.
"policy_mapping_fn": None,
# Optional whitelist of policies to train, or None for all policies.
"policies_to_train": None,
},
}
# __sphinx_doc_end__
# yapf: enable
@DeveloperAPI
def with_common_config(extra_config):
"""Returns the given config dict merged with common agent confs."""
return with_base_config(COMMON_CONFIG, extra_config)
def with_base_config(base_config, extra_config):
"""Returns the given config dict merged with a base agent conf."""
config = copy.deepcopy(base_config)
config.update(extra_config)
return config
@PublicAPI
class Trainer(Trainable):
"""A trainer coordinates the optimization of one or more RL policies.
All RLlib trainers extend this base class, e.g., the A3CTrainer implements
the A3C algorithm for single and multi-agent training.
Trainer objects retain internal model state between calls to train(), so
you should create a new trainer instance for each training session.
Attributes:
env_creator (func): Function that creates a new training env.
config (obj): Algorithm-specific configuration data.
logdir (str): Directory in which training outputs should be placed.
"""
_allow_unknown_configs = False
_allow_unknown_subkeys = [
"tf_session_args", "local_tf_session_args", "env_config", "model",
"optimizer", "multiagent", "custom_resources_per_worker",
"evaluation_config"
]
@PublicAPI
def __init__(self, config=None, env=None, logger_creator=None):
"""Initialize an RLLib trainer.
Args:
config (dict): Algorithm-specific configuration data.
env (str): Name of the environment to use. Note that this can also
be specified as the `env` key in config.
logger_creator (func): Function that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
"""
config = config or {}
if tf and config.get("eager"):
tf.enable_eager_execution()
logger.info("Executing eagerly, with eager_tracing={}".format(
"True" if config.get("eager_tracing") else "False"))
if tf and not tf.executing_eagerly():
logger.info("Tip: set 'eager': true or the --eager flag to enable "
"TensorFlow eager execution")
# Vars to synchronize to workers on each train call
self.global_vars = {"timestep": 0}
# Trainers allow env ids to be passed directly to the constructor.
self._env_id = self._register_if_needed(env or config.get("env"))
# Create a default logger creator if no logger_creator is specified
if logger_creator is None:
timestr = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
logdir_prefix = "{}_{}_{}".format(self._name, self._env_id,
timestr)
def default_logger_creator(config):
"""Creates a Unified logger with a default logdir prefix
containing the agent name and the env id
"""
if not os.path.exists(DEFAULT_RESULTS_DIR):
os.makedirs(DEFAULT_RESULTS_DIR)
logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
return UnifiedLogger(config, logdir, loggers=None)
logger_creator = default_logger_creator
Trainable.__init__(self, config, logger_creator)
@classmethod
@override(Trainable)
def default_resource_request(cls, config):
cf = dict(cls._default_config, **config)
Trainer._validate_config(cf)
# TODO(ekl): add custom resources here once tune supports them
return Resources(
cpu=cf["num_cpus_for_driver"],
gpu=cf["num_gpus"],
memory=cf["memory"],
object_store_memory=cf["object_store_memory"],
extra_cpu=cf["num_cpus_per_worker"] * cf["num_workers"],
extra_gpu=cf["num_gpus_per_worker"] * cf["num_workers"],
extra_memory=cf["memory_per_worker"] * cf["num_workers"],
extra_object_store_memory=cf["object_store_memory_per_worker"] *
cf["num_workers"])
@override(Trainable)
@PublicAPI
def train(self):
"""Overrides super.train to synchronize global vars."""
if self._has_policy_optimizer():
self.global_vars["timestep"] = self.optimizer.num_steps_sampled
self.optimizer.workers.local_worker().set_global_vars(
self.global_vars)
for w in self.optimizer.workers.remote_workers():
w.set_global_vars.remote(self.global_vars)
logger.debug("updated global vars: {}".format(self.global_vars))
result = None
for _ in range(1 + MAX_WORKER_FAILURE_RETRIES):
try:
result = Trainable.train(self)
except RayError as e:
if self.config["ignore_worker_failures"]:
logger.exception(
"Error in train call, attempting to recover")
self._try_recover()
else:
logger.info(
"Worker crashed during call to train(). To attempt to "
"continue training without the failed worker, set "
"`'ignore_worker_failures': True`.")
raise e
except Exception as e:
time.sleep(0.5) # allow logs messages to propagate
raise e
else:
break
if result is None:
raise RuntimeError("Failed to recover from worker crash")
if (self.config.get("observation_filter", "NoFilter") != "NoFilter"
and hasattr(self, "workers")
and isinstance(self.workers, WorkerSet)):
FilterManager.synchronize(
self.workers.local_worker().filters,
self.workers.remote_workers(),
update_remote=self.config["synchronize_filters"])
logger.debug("synchronized filters: {}".format(
self.workers.local_worker().filters))
if self._has_policy_optimizer():
result["num_healthy_workers"] = len(
self.optimizer.workers.remote_workers())
if self.config["evaluation_interval"]:
if self._iteration % self.config["evaluation_interval"] == 0:
evaluation_metrics = self._evaluate()
assert isinstance(evaluation_metrics, dict), \
"_evaluate() needs to return a dict."
result.update(evaluation_metrics)
return result
@override(Trainable)
def _log_result(self, result):
if self.config["callbacks"].get("on_train_result"):
self.config["callbacks"]["on_train_result"]({
"trainer": self,
"result": result,
})
# log after the callback is invoked, so that the user has a chance
# to mutate the result
Trainable._log_result(self, result)
@override(Trainable)
def _setup(self, config):
env = self._env_id
if env:
config["env"] = env
if _global_registry.contains(ENV_CREATOR, env):
self.env_creator = _global_registry.get(ENV_CREATOR, env)
else:
import gym # soft dependency
self.env_creator = lambda env_config: gym.make(env)
else:
self.env_creator = lambda env_config: None
# Merge the supplied config with the class default
merged_config = copy.deepcopy(self._default_config)
merged_config = deep_update(merged_config, config,
self._allow_unknown_configs,
self._allow_unknown_subkeys)
self.raw_user_config = config
self.config = merged_config
Trainer._validate_config(self.config)
if self.config.get("log_level"):
logging.getLogger("ray.rllib").setLevel(self.config["log_level"])
def get_scope():
if tf and not tf.executing_eagerly():
return tf.Graph().as_default()
else:
return open("/dev/null") # fake a no-op scope
with get_scope():
self._init(self.config, self.env_creator)
# Evaluation related
if self.config.get("evaluation_interval"):
# Update env_config with evaluation settings:
extra_config = copy.deepcopy(self.config["evaluation_config"])
extra_config.update({
"batch_mode": "complete_episodes",
"batch_steps": 1,
})
logger.debug(
"using evaluation_config: {}".format(extra_config))
self.evaluation_workers = self._make_workers(
self.env_creator,
self._policy,
merge_dicts(self.config, extra_config),
num_workers=0)
self.evaluation_metrics = self._evaluate()
@override(Trainable)
def _stop(self):
if hasattr(self, "workers"):
self.workers.stop()
if hasattr(self, "optimizer"):
self.optimizer.stop()
@override(Trainable)
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir,
"checkpoint-{}".format(self.iteration))
pickle.dump(self.__getstate__(), open(checkpoint_path, "wb"))
return checkpoint_path
@override(Trainable)
def _restore(self, checkpoint_path):
extra_data = pickle.load(open(checkpoint_path, "rb"))
self.__setstate__(extra_data)
@DeveloperAPI
def _make_workers(self, env_creator, policy, config, num_workers):
return WorkerSet(
env_creator,
policy,
config,
num_workers=num_workers,
logdir=self.logdir)
@DeveloperAPI
def _init(self, config, env_creator):
"""Subclasses should override this for custom initialization."""
raise NotImplementedError
@DeveloperAPI
def _evaluate(self):
"""Evaluates current policy under `evaluation_config` settings.
Note that this default implementation does not do anything beyond
merging evaluation_config with the normal trainer config.
"""
if not self.config["evaluation_config"]:
raise ValueError(
"No evaluation_config specified. It doesn't make sense "
"to enable evaluation without specifying any config "
"overrides, since the results will be the "
"same as reported during normal policy evaluation.")
logger.info("Evaluating current policy for {} episodes".format(
self.config["evaluation_num_episodes"]))
self._before_evaluate()
self.evaluation_workers.local_worker().restore(
self.workers.local_worker().save())
for _ in range(self.config["evaluation_num_episodes"]):
self.evaluation_workers.local_worker().sample()
metrics = collect_metrics(self.evaluation_workers.local_worker())
return {"evaluation": metrics}
@DeveloperAPI
def _before_evaluate(self):
"""Pre-evaluation callback."""
pass
@PublicAPI
def compute_action(self,
observation,
state=None,
prev_action=None,
prev_reward=None,
info=None,
policy_id=DEFAULT_POLICY_ID,
full_fetch=False):
"""Computes an action for the specified policy.
Note that you can also access the policy object through
self.get_policy(policy_id) and call compute_actions() on it directly.
Arguments:
observation (obj): observation from the environment.
state (list): RNN hidden state, if any. If state is not None,
then all of compute_single_action(...) is returned
(computed action, rnn state, logits dictionary).
Otherwise compute_single_action(...)[0] is
returned (computed action).
prev_action (obj): previous action value, if any
prev_reward (int): previous reward, if any
info (dict): info object, if any
policy_id (str): policy to query (only applies to multi-agent).
full_fetch (bool): whether to return extra action fetch results.
This is always set to true if RNN state is specified.
Returns:
Just the computed action if full_fetch=False, or the full output
of policy.compute_actions() otherwise.
"""
if state is None:
state = []
preprocessed = self.workers.local_worker().preprocessors[
policy_id].transform(observation)
filtered_obs = self.workers.local_worker().filters[policy_id](
preprocessed, update=False)
if state:
return self.get_policy(policy_id).compute_single_action(
filtered_obs,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"])
res = self.get_policy(policy_id).compute_single_action(
filtered_obs,
state,
prev_action,
prev_reward,
info,
clip_actions=self.config["clip_actions"])
if full_fetch:
return res
else:
return res[0] # backwards compatibility
@property
def _name(self):
"""Subclasses should override this to declare their name."""
raise NotImplementedError
@property
def _default_config(self):
"""Subclasses should override this to declare their default config."""
raise NotImplementedError
@PublicAPI
def get_policy(self, policy_id=DEFAULT_POLICY_ID):
"""Return policy for the specified id, or None.
Arguments:
policy_id (str): id of policy to return.
"""
return self.workers.local_worker().get_policy(policy_id)
@PublicAPI
def get_weights(self, policies=None):
"""Return a dictionary of policy ids to weights.
Arguments:
policies (list): Optional list of policies to return weights for,
or None for all policies.
"""
return self.workers.local_worker().get_weights(policies)
@PublicAPI
def set_weights(self, weights):
"""Set policy weights by policy id.
Arguments:
weights (dict): Map of policy ids to weights to set.
"""
self.workers.local_worker().set_weights(weights)
@DeveloperAPI
def export_policy_model(self, export_dir, policy_id=DEFAULT_POLICY_ID):
"""Export policy model with given policy_id to local directory.
Arguments:
export_dir (string): Writable local directory.
policy_id (string): Optional policy id to export.
Example:
>>> trainer = MyTrainer()
>>> for _ in range(10):
>>> trainer.train()
>>> trainer.export_policy_model("/tmp/export_dir")
"""
self.workers.local_worker().export_policy_model(export_dir, policy_id)
@DeveloperAPI
def export_policy_checkpoint(self,
export_dir,
filename_prefix="model",
policy_id=DEFAULT_POLICY_ID):
"""Export tensorflow policy model checkpoint to local directory.
Arguments:
export_dir (string): Writable local directory.
filename_prefix (string): file name prefix of checkpoint files.
policy_id (string): Optional policy id to export.
Example:
>>> trainer = MyTrainer()
>>> for _ in range(10):
>>> trainer.train()
>>> trainer.export_policy_checkpoint("/tmp/export_dir")
"""
self.workers.local_worker().export_policy_checkpoint(
export_dir, filename_prefix, policy_id)
@DeveloperAPI
def collect_metrics(self, selected_workers=None):
"""Collects metrics from the remote workers of this agent.
This is the same data as returned by a call to train().
"""
return self.optimizer.collect_metrics(
self.config["collect_metrics_timeout"],
min_history=self.config["metrics_smoothing_episodes"],
selected_workers=selected_workers)
@classmethod
def resource_help(cls, config):
return ("\n\nYou can adjust the resource requests of RLlib agents by "
"setting `num_workers`, `num_gpus`, and other configs. See "
"the DEFAULT_CONFIG defined by each agent for more info.\n\n"
"The config of this agent is: {}".format(config))
@staticmethod
def _validate_config(config):
if "policy_graphs" in config["multiagent"]:
logger.warning(
"The `policy_graphs` config has been renamed to `policies`.")
# Backwards compatibility
config["multiagent"]["policies"] = config["multiagent"][
"policy_graphs"]
del config["multiagent"]["policy_graphs"]
if "gpu" in config:
raise ValueError(
"The `gpu` config is deprecated, please use `num_gpus=0|1` "
"instead.")
if "gpu_fraction" in config:
raise ValueError(
"The `gpu_fraction` config is deprecated, please use "
"`num_gpus=<fraction>` instead.")
if "use_gpu_for_workers" in config:
raise ValueError(
"The `use_gpu_for_workers` config is deprecated, please use "
"`num_gpus_per_worker=1` instead.")
if type(config["input_evaluation"]) != list:
raise ValueError(
"`input_evaluation` must be a list of strings, got {}".format(
config["input_evaluation"]))
def _try_recover(self):
"""Try to identify and blacklist any unhealthy workers.
This method is called after an unexpected remote error is encountered
from a worker. It issues check requests to all current workers and
blacklists any that respond with error. If no healthy workers remain,
an error is raised.
"""
if not self._has_policy_optimizer():
raise NotImplementedError(
"Recovery is not supported for this algorithm")
logger.info("Health checking all workers...")
checks = []
for ev in self.optimizer.workers.remote_workers():
_, obj_id = ev.sample_with_count.remote()
checks.append(obj_id)
healthy_workers = []
for i, obj_id in enumerate(checks):
w = self.optimizer.workers.remote_workers()[i]
try:
ray_get_and_free(obj_id)
healthy_workers.append(w)
logger.info("Worker {} looks healthy".format(i + 1))
except RayError:
logger.exception("Blacklisting worker {}".format(i + 1))
try:
w.__ray_terminate__.remote()
except Exception:
logger.exception("Error terminating unhealthy worker")
if len(healthy_workers) < 1:
raise RuntimeError(
"Not enough healthy workers remain to continue.")
self.optimizer.reset(healthy_workers)
def _has_policy_optimizer(self):
return hasattr(self, "optimizer") and isinstance(
self.optimizer, PolicyOptimizer)
@override(Trainable)
def _export_model(self, export_formats, export_dir):
ExportFormat.validate(export_formats)
exported = {}
if ExportFormat.CHECKPOINT in export_formats:
path = os.path.join(export_dir, ExportFormat.CHECKPOINT)
self.export_policy_checkpoint(path)
exported[ExportFormat.CHECKPOINT] = path
if ExportFormat.MODEL in export_formats:
path = os.path.join(export_dir, ExportFormat.MODEL)
self.export_policy_model(path)
exported[ExportFormat.MODEL] = path
return exported
def __getstate__(self):
state = {}
if hasattr(self, "workers"):
state["worker"] = self.workers.local_worker().save()
if hasattr(self, "optimizer") and hasattr(self.optimizer, "save"):
state["optimizer"] = self.optimizer.save()
return state
def __setstate__(self, state):
if "worker" in state:
self.workers.local_worker().restore(state["worker"])
remote_state = ray.put(state["worker"])
for r in self.workers.remote_workers():
r.restore.remote(remote_state)
if "optimizer" in state:
self.optimizer.restore(state["optimizer"])
def _register_if_needed(self, env_object):
if isinstance(env_object, six.string_types):
return env_object
elif isinstance(env_object, type):
name = env_object.__name__
register_env(name, lambda config: env_object(config))
return name
raise ValueError(
"{} is an invalid env specification. ".format(env_object) +
"You can specify a custom env as either a class "
"(e.g., YourEnvCls) or a registered env id (e.g., \"your_env\").")
| ujvl/ray-ng | rllib/agents/trainer.py | Python | apache-2.0 | 35,232 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import
""" Data iterators for common data formats and utility functions."""
from __future__ import absolute_import
from . import io
from .io import CSVIter, DataBatch, DataDesc, DataIter, ImageDetRecordIter, ImageRecordInt8Iter, ImageRecordIter,\
ImageRecordIter_v1, ImageRecordUInt8Iter, ImageRecordUInt8Iter_v1, LibSVMIter, MNISTIter, MXDataIter, NDArrayIter,\
PrefetchingIter, ResizeIter
from . import utils
from .utils import _init_data, _getdata_by_idx, _has_instance
| mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/python/mxnet/io/__init__.py | Python | apache-2.0 | 1,335 |
from datetime import datetime, timedelta
from django.db import connection
from core.libs.exlib import dictfetchall
from core.iDDS.useconstants import SubstitleValue
from core.settings.config import DB_SCHEMA_IDDS
from core.settings.local import defaultDatetimeFormat
subtitleValue = SubstitleValue()
def getTransforms(requestid):
sqlpar = {"requestid": requestid}
sql = """
select r.request_id, wt.transform_id
from atlas_idds.requests r
full outer join (
select request_id, workprogress_id from atlas_idds.workprogresses
) wp on (r.request_id=wp.request_id)
full outer join atlas_idds.wp2transforms wt on (wp.workprogress_id=wt.workprogress_id)
where r.request_id=:requestid
"""
cur = connection.cursor()
cur.execute(sql, sqlpar)
rows = dictfetchall(cur)
cur.close()
return rows
def getRequests(query_params):
condition = '(1=1)'
sqlpar = {}
if query_params and len(query_params) > 0:
query_params = subtitleValue.replaceInverseKeys('requests', query_params)
if 'reqstatus' in query_params:
sqlpar['rstatus'] = query_params['reqstatus']
condition = 'r.status = :rstatus'
sql = f"""
select r.request_id, r.scope, r.name, r.status, tr.transform_id, tr.transform_status, tr.in_status, tr.in_total_files, tr.in_processed_files, tr.out_status, tr.out_total_files, tr.out_processed_files
from {DB_SCHEMA_IDDS}.requests r
full outer join (
select t.request_id, t.transform_id, t.status transform_status, in_coll.status in_status, in_coll.total_files in_total_files,
in_coll.processed_files in_processed_files, out_coll.status out_status, out_coll.total_files out_total_files,
out_coll.processed_files out_processed_files
from {DB_SCHEMA_IDDS}.transforms t
full outer join (select coll_id , transform_id, status, total_files, processed_files from {DB_SCHEMA_IDDS}.collections where relation_type = 0) in_coll on (t.transform_id = in_coll.transform_id)
full outer join (select coll_id , transform_id, status, total_files, processed_files from {DB_SCHEMA_IDDS}.collections where relation_type = 1) out_coll on (t.transform_id = out_coll.transform_id)
) tr on (r.request_id=tr.request_id)
where {condition}
"""
cur = connection.cursor()
cur.execute(sql, sqlpar)
rows = dictfetchall(cur)
cur.close()
return rows
def prepareSQLQueryParameters(request_params):
sqlpar, condition = {}, " (1=1) "
request_params = {key: value for key,value in request_params.items() if key in ['requestid', 'username', 'status']}
query_fields_for_subst = ['status']
dict_for_subst = {key:request_params.get(key) for key in query_fields_for_subst if key in request_params}
query_params_substituted = subtitleValue.replaceInverseKeys('requests', dict_for_subst)
sqlpar['starttime'] = (datetime.utcnow()-timedelta(hours=24*90)).strftime(defaultDatetimeFormat)
condition += 'AND r.CREATED_AT > :starttime '
for key in query_params_substituted.keys():
request_params[key] = query_params_substituted[key]
if request_params and len(request_params) > 0:
if 'requestid' in request_params:
sqlpar['requestid'] = request_params['requestid']
condition += 'AND r.REQUEST_ID = :requestid'
if 'username' in request_params:
if request_params['username'] == 'Not set':
condition += 'AND r.USERNAME is NULL '
else:
sqlpar['username'] = request_params['username'].lower()
condition += 'AND lower(r.USERNAME) = :username'
if 'status' in request_params:
sqlpar['status'] = query_params_substituted.get('status')
condition += 'AND r.STATUS = :status'
return sqlpar, condition
def getWorkFlowProgressItemized(request_params):
sqlpar, condition = prepareSQLQueryParameters(request_params)
sql = f"""
SELECT r.REQUEST_ID, r.NAME as r_NAME, r.STATUS as r_STATUS, r.CREATED_AT as r_CREATED_AT, r.CREATED_AT as r_CREATED_AT, c.total_files,
c.processed_files, c.processing_files, c.transform_id, t.workload_id, p.status as p_status, r.USERNAME FROM {DB_SCHEMA_IDDS}.requests r LEFT JOIN {DB_SCHEMA_IDDS}.collections c ON r.REQUEST_ID=c.REQUEST_ID
LEFT JOIN {DB_SCHEMA_IDDS}.transforms t ON t.transform_id = c.transform_id
LEFT JOIN {DB_SCHEMA_IDDS}.processings p on p.transform_id=t.transform_id
where c.relation_type=0 and {condition} order by r.request_id desc
"""
cur = connection.cursor()
cur.execute(sql, sqlpar)
rows = dictfetchall(cur)
cur.close()
return rows
| PanDAWMS/panda-bigmon-core | core/iDDS/rawsqlquery.py | Python | apache-2.0 | 4,650 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from jacket import context
from jacket.tests.storage.unit.targets import targets_fixture as tf
from jacket.storage import utils
from jacket.storage.volume.targets import scst
from jacket.storage.volume import utils as vutils
class TestSCSTAdmDriver(tf.TargetDriverFixture):
def setUp(self):
super(TestSCSTAdmDriver, self).setUp()
self.target = scst.SCSTAdm(root_helper=utils.get_root_helper(),
configuration=self.configuration)
self.fake_iscsi_scan = \
('Collecting current configuration: done.\n'
'Driver Target\n'
'----------------------------------------------\n'
'iscsi iqn.2010-10.org.openstack:'
'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba\n'
'All done.\n')
self.fake_iscsi_attribute_scan = \
('Collecting current configuration: done.\n'
'Attribute Value Writable KEY\n'
'------------------------------------------\n'
'rel_tgt_id 1 Yes Yes\n'
'Dynamic attributes available\n'
'----------------------------\n'
'IncomingUser\n'
'OutgoingUser\n'
'allowed_portal\n'
'LUN CREATE attributes available\n'
'-------------------------------\n'
'read_only\n'
'All done.\n')
self.fake_list_group = \
('org.openstack:volume-vedams\n'
'Collecting current configuration: done.\n'
'Driver: iscsi\n'
'Target: iqn.2010-10.org.openstack:volume-vedams\n'
'Driver/target \'iscsi/iqn.2010-10.org.openstack:volume-vedams\''
'has no associated LUNs.\n'
'Group: iqn.1993-08.org.debian:01:626bf14ebdc\n'
'Assigned LUNs:\n'
'LUN Device\n'
'------------------\n'
'1 1b67387810256\n'
'2 2a0f1cc9cd595\n'
'Assigned Initiators:\n'
'Initiator\n'
'-------------------------------------\n'
'iqn.1993-08.org.debian:01:626bf14ebdc\n'
'All done.\n')
self.target.db = mock.MagicMock(
volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'})
@mock.patch.object(utils, 'execute')
@mock.patch.object(scst.SCSTAdm, '_target_attribute')
@mock.patch.object(scst.SCSTAdm, 'scst_execute')
def test_get_target(self, mock_execute,
mock_target_attribute,
mock_scst_execute):
mock_target_attribute.return_value = 1
mock_execute.return_value = (self.fake_iscsi_scan, None)
expected = 1
self.assertEqual(expected, self.target._get_target(
'iqn.2010-10.org.openstack:'
'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba'))
@mock.patch.object(utils, 'execute')
def test_target_attribute(self, mock_execute):
mock_execute.return_value = (self.fake_iscsi_attribute_scan, None)
self.assertEqual(str(1), self.target._target_attribute(
'iqn.2010-10.org.openstack:'
'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba'))
def test_single_lun_get_target_and_lun(self):
ctxt = context.get_admin_context()
self.assertEqual((0, 1), self.target._get_target_and_lun(
ctxt, self.testvol))
@mock.patch.object(utils, 'execute')
@mock.patch.object(scst.SCSTAdm, '_get_group')
@mock.patch.object(scst.SCSTAdm, 'scst_execute')
def test_multi_lun_get_target_and_lun(self, mock_execute, mock_get_group,
mock_scst_execute):
mock_execute.return_value = (self.fake_list_group, None)
mock_get_group.return_value = self.fake_list_group
ctxt = context.get_admin_context()
with mock.patch.object(self.target, 'target_name',
return_value='iqn.2010-10.org.openstack:'
'volume-vedams'):
self.assertEqual((0, 3), self.target._get_target_and_lun(
ctxt, self.testvol))
@mock.patch.object(utils, 'execute')
@mock.patch.object(scst.SCSTAdm, '_get_target')
@mock.patch.object(scst.SCSTAdm, 'scst_execute')
def test_create_iscsi_target(self, mock_execute, mock_get_target,
mock_scst_execute):
mock_execute.return_value = (None, None)
mock_get_target.return_value = 1
self.assertEqual(1,
self.target.create_iscsi_target(
'iqn.2010-10.org.openstack:'
'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba',
'vol1',
0, 1, self.fake_volumes_dir))
@mock.patch.object(utils, 'execute')
@mock.patch.object(scst.SCSTAdm, '_get_target')
@mock.patch.object(scst.SCSTAdm, 'scst_execute')
def test_create_export(self, mock_execute,
mock_get_target,
mock_scst_execute):
mock_execute.return_value = (None, None)
mock_scst_execute.return_value = (None, None)
mock_get_target.return_value = 1
def _fake_get_target_and_lun(*args, **kwargs):
return 0, 1
def _fake_iscsi_location(*args, **kwargs):
return '10.9.8.7:3260,1 iqn.2010-10.org.openstack:' \
'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba 1'
def _fake_get_target_chap_auth(*args, **kwargs):
return ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ')
ctxt = context.get_admin_context()
expected_result = {'location': '10.9.8.7:3260,1 '
'iqn.2010-10.org.openstack:'
'volume-ed2c2222-5fc0-11e4-aa15-123b93f75cba 1',
'auth': 'CHAP '
'QZJbisGmn9AL954FNF4D P68eE7u9eFqDGexd28DQ'}
with mock.patch.object(self.target, '_get_target_and_lun',
side_effect=_fake_get_target_and_lun),\
mock.patch.object(self.target, '_get_target_chap_auth',
side_effect=_fake_get_target_chap_auth),\
mock.patch.object(self.target, 'initiator_iqn',
return_value='iqn.1993-08.org.debian:'
'01:626bf14ebdc'),\
mock.patch.object(self.target, '_iscsi_location',
side_effect=_fake_iscsi_location),\
mock.patch.object(self.target, 'target_driver',
return_value='iscsi'),\
mock.patch.object(vutils, 'generate_username',
side_effect=lambda: 'QZJbisGmn9AL954FNF4D'),\
mock.patch.object(vutils, 'generate_password',
side_effect=lambda: 'P68eE7u9eFqDGexd28DQ'):
self.assertEqual(expected_result,
self.target.create_export(ctxt,
self.testvol,
self.fake_volumes_dir))
@mock.patch('storage.utils.execute')
@mock.patch.object(scst.SCSTAdm, '_get_target')
@mock.patch.object(scst.SCSTAdm, 'scst_execute')
def test_ensure_export(self, mock_execute,
mock_get_target,
mock_scst_execute):
mock_execute.return_value = (None, None)
mock_scst_execute.return_value = (None, None)
mock_get_target.return_value = 1
ctxt = context.get_admin_context()
def _fake_get_target_and_lun(*args, **kwargs):
return 0, 1
def _fake_get_target_chap_auth(*args, **kwargs):
return ('QZJbisGmn9AL954FNF4D', 'P68eE7u9eFqDGexd28DQ')
with mock.patch.object(self.target, 'create_iscsi_target'),\
mock.patch.object(self.target, '_get_target_chap_auth',
side_effect=_fake_get_target_chap_auth),\
mock.patch.object(self.target, '_get_target_and_lun',
side_effect=_fake_get_target_and_lun):
self.target.ensure_export(ctxt,
self.testvol,
self.fake_volumes_dir)
self.target.create_iscsi_target.assert_called_once_with(
'iqn.2010-10.org.openstack:testvol',
'ed2c2222-5fc0-11e4-aa15-123b93f75cba',
0, 1, self.fake_volumes_dir, _fake_get_target_chap_auth())
@mock.patch('storage.utils.execute')
@mock.patch.object(scst.SCSTAdm, '_get_target')
@mock.patch.object(scst.SCSTAdm, 'scst_execute')
def test_ensure_export_chap(self, mock_execute,
mock_get_target,
mock_scst_execute):
mock_execute.return_value = (None, None)
mock_scst_execute.return_value = (None, None)
mock_get_target.return_value = 1
ctxt = context.get_admin_context()
def _fake_get_target_and_lun(*args, **kwargs):
return 0, 1
def _fake_get_target_chap_auth(*args, **kwargs):
return None
with mock.patch.object(self.target, 'create_iscsi_target'),\
mock.patch.object(self.target, '_get_target_chap_auth',
side_effect=_fake_get_target_chap_auth),\
mock.patch.object(self.target, '_get_target_and_lun',
side_effect=_fake_get_target_and_lun):
self.target.ensure_export(ctxt,
self.testvol,
self.fake_volumes_dir)
self.target.create_iscsi_target.assert_called_once_with(
'iqn.2010-10.org.openstack:testvol',
'ed2c2222-5fc0-11e4-aa15-123b93f75cba',
0, 1, self.fake_volumes_dir, None)
| HybridF5/jacket | jacket/tests/storage/unit/targets/test_scst_driver.py | Python | apache-2.0 | 10,762 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volume types manage extension."""
import webob
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api.v1 import types
from cinder.api.views import types as views_types
from cinder import exception
from cinder.volume import volume_types
authorize = extensions.extension_authorizer('volume', 'types_manage')
class VolumeTypesManageController(wsgi.Controller):
"""The volume types API controller for the OpenStack API."""
_view_builder_class = views_types.ViewBuilder
@wsgi.action("create")
@wsgi.serializers(xml=types.VolumeTypeTemplate)
def _create(self, req, body):
"""Creates a new volume type."""
context = req.environ['cinder.context']
authorize(context)
if not self.is_valid_body(body, 'volume_type'):
raise webob.exc.HTTPBadRequest()
vol_type = body['volume_type']
name = vol_type.get('name', None)
specs = vol_type.get('extra_specs', {})
if name is None or name == "":
raise webob.exc.HTTPBadRequest()
try:
volume_types.create(context, name, specs)
vol_type = volume_types.get_volume_type_by_name(context, name)
except exception.VolumeTypeExists as err:
raise webob.exc.HTTPConflict(explanation=str(err))
except exception.NotFound:
raise webob.exc.HTTPNotFound()
return self._view_builder.show(req, vol_type)
@wsgi.action("delete")
def _delete(self, req, id):
"""Deletes an existing volume type."""
context = req.environ['cinder.context']
authorize(context)
try:
vol_type = volume_types.get_volume_type(context, id)
volume_types.destroy(context, vol_type['id'])
except exception.NotFound:
raise webob.exc.HTTPNotFound()
return webob.Response(status_int=202)
class Types_manage(extensions.ExtensionDescriptor):
"""Types manage support."""
name = "TypesManage"
alias = "os-types-manage"
namespace = "http://docs.openstack.org/volume/ext/types-manage/api/v1"
updated = "2011-08-24T00:00:00+00:00"
def get_controller_extensions(self):
controller = VolumeTypesManageController()
extension = extensions.ControllerExtension(self, 'types', controller)
return [extension]
| tomasdubec/openstack-cinder | cinder/api/contrib/types_manage.py | Python | apache-2.0 | 3,054 |
from optparse import OptionParser
import os
import re
import sys
import codecs
from .sentiment_tree import SentimentTree
from .binary_tree import BinaryTree
def main():
usage = "%prog data_dir"
parser = OptionParser(usage=usage)
#parser.add_option('--keyword', dest='key', default=None,
# help='Keyword argument: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
data_dir = args[0]
#phrases = load_dictionary(data_dir)
# load all the data
train_tree_dict = load_ptb_trees(data_dir, "train")
#return get_binary_dataset(train_tree_dict)
def load_ptb_trees(data_dir, dataset="train", binary=False, root_only=True):
"""
Load the train/dev/test data from the PTB-formatted files
:param data_dir: data directory
:param dataset: "train", "dev", or "test"
:param binary: if True, sentiment values are binary [0/1]; otherwise they are [0/1/2/3/4]
:param root_only: if True, only return the data for the whole tree (not subtrees)
:return: dict of the form {index: tree}, where index is (sentence_index).(subtree_index)
"""
filename = os.path.join(data_dir, 'trees', dataset + '.txt')
tree_dict = {}
vocab = set()
# read in all the strings, convert them to trees, and store them in a dict
with codecs.open(filename, 'r', encoding='utf-8') as input_file:
for line_index, line in enumerate(input_file):
tree = convert_ptb_to_tree(line)
seqs_and_masks = tree.get_all_sequences_and_masks(root_only=root_only)
for node_tuple_index, node_tuple in enumerate(seqs_and_masks):
key = str(line_index) + '.' + str(node_tuple_index)
words, left_mask, right_mask, value = node_tuple
if binary:
if value > 2:
tree_dict[key] = {'words': words, 'left_mask': left_mask, 'right_mask': right_mask, 'value': 1}
vocab.update(set(words))
elif value < 2:
tree_dict[key] = {'words': words, 'left_mask': left_mask, 'right_mask': right_mask, 'value': 0}
vocab.update(set(words))
else:
tree_dict[key] = {'words': words, 'left_mask': left_mask, 'right_mask': right_mask, 'value': value}
vocab.update(set(words))
return tree_dict, vocab
def load_ptb_bitrees(data_dir, dataset="train", root_only=True):
"""
Load the train/dev/test data from the PTB-formatted files [as binary trees]
:param data_dir: data directory
:param dataset: "train", "dev", or "test"
:return: dict of the form {index: tree}, where index is (sentence_index).(subtree_index)
"""
filename = os.path.join(data_dir, 'trees', dataset + '.txt')
tree_dict = {}
vocab = set()
edge_vocab = set()
# read in all the strings, convert them to trees, and store them in a dict
with codecs.open(filename, 'r', encoding='utf-8') as input_file:
for line_index, line in enumerate(input_file):
key = str(line_index)
tree = convert_ptb_to_tree(line)
sequences_and_masks = tree.get_all_bidirectional(root_only=root_only)
for node_tuple_index, node_tuple in enumerate(sequences_and_masks):
key = str(line_index) + '.' + str(node_tuple_index)
words, input_idx, edge_mask, output_mask, values, edges = node_tuple
#words, input_idx, edge_mask, output_mask, values, edges = tree.get_bidirectional_sequence_and_mask()
tree_dict[key] = {'words': words, 'input_idx': input_idx, 'edge_mask': edge_mask,
'output_mask': output_mask, 'values': values, 'edges': edges, 'tree': tree}
vocab.update(set(words))
edge_vocab.update(set(edges))
return tree_dict, vocab, edge_vocab
def convert_ptb_to_tree(line):
index = 0
tree = None
line = line.rstrip()
while '((' in line:
line = re.sub('\(\(', '( (', line)
while '))' in line:
line = re.sub('\)\)', ') )', line)
stack = []
parts = line.split()
for p_i, p in enumerate(parts):
# opening of a bracket, create a new node, take parent from top of stack
if p[0] == '(':
tag = p[1:]
if tree is None:
tree = BinaryTree(index, tag)
else:
add_descendant(tree, index, tag, stack[-1])
# add the newly created node to the stack and increment the index
stack.append(index)
index += 1
# otherwise, update the word of the node on top of the stack, and pop it
elif p[-1] == ')':
tag = p[:-1]
tag = re.sub('\\\\', '', tag)
if tag != '':
tree.set_word(index-1, tag)
stack.pop(-1)
else:
# deal with a couple of edge cases
parts[p_i+1] = p + '_' + parts[p_i+1]
return tree
def add_descendant(tree, index, tag, parent_index):
# add to the left first if possible, then to the right
if tree.has_left_descendant_at_node(parent_index):
if tree.has_right_descendant_at_node(parent_index):
sys.exit("Node " + str(parent_index) + " already has two children")
else:
tree.add_right_descendant(index, tag, parent_index)
else:
tree.add_left_descendant(index, tag, parent_index)
if __name__ == '__main__':
main()
# The functions below were to load the files in the sentiment treebank .zip file, but it turns out there is
# inconsistent encoding, and all the information is in the PTB format anyway...
def load_sentences(data_dir):
filename = os.path.join(data_dir, 'datasetSentences.txt')
sentences = {}
header = True
with codecs.open(filename, 'r', encoding='utf-8') as input_file:
for line in input_file:
if header:
header = False
else:
line = line.rstrip()
parts = line.split('\t')
index = int(parts[0])
sentence = parts[1]
sentences[index] = sentence
return sentences
def load_dictionary(data_dir):
phrases = {}
filename = os.path.join(data_dir, 'dictionary.txt')
with codecs.open(filename, 'r', encoding='utf-8') as input_file:
for line in input_file:
line = line.rstrip()
phrase, index = line.split('|')
phrases[phrase] = int(index)
return phrases
def load_labels(data_dir, binning=True):
labels = {}
filename = os.path.join(data_dir, 'sentiment_labels.txt')
header = True
with codecs.open(filename, 'r', encoding='utf-8') as input_file:
for line in input_file:
if header:
header = False
else:
line = line.rstrip()
phrase, label = line.split('|')
labels[int(phrase)] = float(label)
return labels
def load_data_splits(data_dir):
filename = os.path.join(data_dir, 'datasetSplit.txt')
train = []
dev = []
test = []
header = True
with codecs.open(filename, 'r', encoding='utf-8') as input_file:
for line in input_file:
if header:
header = False
else:
line = line.rstrip()
parts = line.split(',')
sentence_index = int(parts[0])
split = int(parts[1])
if split == 1:
train.append(sentence_index)
elif split == 2:
test.append(sentence_index)
else:
dev.append(sentence_index)
print len(train), len(dev), len(test)
return train, dev, test
| dallascard/guac | core/dataset_scripts/sst/read_data.py | Python | apache-2.0 | 7,998 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from enum import Enum
from genericpath import exists
from logging import DEBUG, getLogger
from os.path import join
import sys
from textwrap import dedent
from .index import get_reduced_index
from .link import PrefixSetup, UnlinkLinkTransaction
from .prefix_data import PrefixData
from .subdir_data import SubdirData
from .. import CondaError, __version__ as CONDA_VERSION
from .._vendor.auxlib.ish import dals
from .._vendor.boltons.setutils import IndexedSet
from ..base.constants import UNKNOWN_CHANNEL
from ..base.context import context
from ..common.compat import iteritems, itervalues, odict, string_types, text_type
from ..common.constants import NULL
from ..common.io import Spinner
from ..common.path import get_major_minor_version, paths_equal
from ..exceptions import PackagesNotFoundError
from ..gateways.logging import TRACE
from ..history import History
from ..models.channel import Channel
from ..models.dist import Dist
from ..models.enums import NoarchType
from ..models.match_spec import MatchSpec
from ..models.prefix_graph import PrefixGraph
from ..models.version import VersionOrder
from ..resolve import Resolve, dashlist
try:
from cytoolz.itertoolz import concat, concatv, groupby
except ImportError:
from .._vendor.toolz.itertoolz import concat, concatv, groupby # NOQA
log = getLogger(__name__)
class DepsModifier(Enum):
"""Flags to enable alternate handling of dependencies."""
NO_DEPS = 'no_deps'
ONLY_DEPS = 'only_deps'
UPDATE_DEPS = 'update_deps'
UPDATE_DEPS_ONLY_DEPS = 'update_deps_only_deps'
UPDATE_ALL = 'update_all'
FREEZE_INSTALLED = 'freeze_installed' # freeze is a better name for --no-update-deps
class Solver(object):
"""
A high-level API to conda's solving logic. Three public methods are provided to access a
solution in various forms.
* :meth:`solve_final_state`
* :meth:`solve_for_diff`
* :meth:`solve_for_transaction`
"""
def __init__(self, prefix, channels, subdirs=(), specs_to_add=(), specs_to_remove=()):
"""
Args:
prefix (str):
The conda prefix / environment location for which the :class:`Solver`
is being instantiated.
channels (Sequence[:class:`Channel`]):
A prioritized list of channels to use for the solution.
subdirs (Sequence[str]):
A prioritized list of subdirs to use for the solution.
specs_to_add (Set[:class:`MatchSpec`]):
The set of package specs to add to the prefix.
specs_to_remove (Set[:class:`MatchSpec`]):
The set of package specs to remove from the prefix.
"""
self.prefix = prefix
self.channels = IndexedSet(Channel(c) for c in channels or context.channels)
self.subdirs = tuple(s for s in subdirs or context.subdirs)
self.specs_to_add = frozenset(MatchSpec.merge(s for s in specs_to_add))
self.specs_to_remove = frozenset(MatchSpec.merge(s for s in specs_to_remove))
assert all(s in context.known_subdirs for s in self.subdirs)
self._index = None
self._r = None
self._prepared = False
def solve_final_state(self, deps_modifier=NULL, prune=NULL, ignore_pinned=NULL,
force_remove=NULL):
"""Gives the final, solved state of the environment.
Args:
deps_modifier (DepsModifier):
An optional flag indicating special solver handling for dependencies. The
default solver behavior is to be as conservative as possible with dependency
updates (in the case the dependency already exists in the environment), while
still ensuring all dependencies are satisfied. Options include
* NO_DEPS
* ONLY_DEPS
* UPDATE_DEPS
* UPDATE_DEPS_ONLY_DEPS
* FREEZE_INSTALLED
prune (bool):
If ``True``, the solution will not contain packages that were
previously brought into the environment as dependencies but are no longer
required as dependencies and are not user-requested.
ignore_pinned (bool):
If ``True``, the solution will ignore pinned package configuration
for the prefix.
force_remove (bool):
Forces removal of a package without removing packages that depend on it.
Returns:
Tuple[PackageRef]:
In sorted dependency order from roots to leaves, the package references for
the solved state of the environment.
"""
prune = context.prune if prune is NULL else prune
ignore_pinned = context.ignore_pinned if ignore_pinned is NULL else ignore_pinned
deps_modifier = context.deps_modifier if deps_modifier is NULL else deps_modifier
if isinstance(deps_modifier, string_types):
deps_modifier = DepsModifier(deps_modifier.lower())
specs_to_remove = self.specs_to_remove
specs_to_add = self.specs_to_add
# force_remove is a special case where we return early
if specs_to_remove and force_remove:
if specs_to_add:
raise NotImplementedError()
index, r = self._prepare(specs_to_remove)
solution = tuple(Dist(rec) for rec in PrefixData(self.prefix).iter_records()
if not any(spec.match(rec) for spec in specs_to_remove))
return IndexedSet(index[d] for d in r.dependency_sort({d.name: d for d in solution}))
log.debug("solving prefix %s\n"
" specs_to_remove: %s\n"
" specs_to_add: %s\n"
" prune: %s", self.prefix, specs_to_remove, specs_to_add, prune)
# declare starting point, the initial state of the environment
# `solution` and `specs_map` are mutated throughout this method
prefix_data = PrefixData(self.prefix)
solution = tuple(Dist(d) for d in prefix_data.iter_records())
specs_from_history_map = History(self.prefix).get_requested_specs_map()
if prune: # or deps_modifier == DepsModifier.UPDATE_ALL # pending conda/constructor#138
# Users are struggling with the prune functionality in --update-all, due to
# https://github.com/conda/constructor/issues/138. Until that issue is resolved,
# and for the foreseeable future, it's best to be more conservative with --update-all.
# Start with empty specs map for UPDATE_ALL because we're optimizing the update
# only for specs the user has requested; it's ok to remove dependencies.
specs_map = odict()
# However, because of https://github.com/conda/constructor/issues/138, we need
# to hard-code keeping conda, conda-build, and anaconda, if they're already in
# the environment.
solution_pkg_names = set(d.name for d in solution)
ensure_these = (pkg_name for pkg_name in {
'anaconda', 'conda', 'conda-build',
} if pkg_name not in specs_from_history_map and pkg_name in solution_pkg_names)
for pkg_name in ensure_these:
specs_from_history_map[pkg_name] = MatchSpec(pkg_name)
else:
specs_map = odict((d.name, MatchSpec(d.name)) for d in solution)
# add in historically-requested specs
specs_map.update(specs_from_history_map)
# let's pretend for now that this is the right place to build the index
prepared_specs = set(concatv(
specs_to_remove,
specs_to_add,
itervalues(specs_from_history_map),
))
index, r = self._prepare(prepared_specs)
if specs_to_remove:
# In a previous implementation, we invoked SAT here via `r.remove()` to help with
# spec removal, and then later invoking SAT again via `r.solve()`. Rather than invoking
# SAT for spec removal determination, we can use the PrefixGraph and simple tree
# traversal if we're careful about how we handle features. We still invoke sat via
# `r.solve()` later.
_track_fts_specs = (spec for spec in specs_to_remove if 'track_features' in spec)
feature_names = set(concat(spec.get_raw_value('track_features')
for spec in _track_fts_specs))
graph = PrefixGraph((index[dist] for dist in solution), itervalues(specs_map))
removed_records = []
for spec in specs_to_remove:
# If the spec was a track_features spec, then we need to also remove every
# package with a feature that matches the track_feature. The
# `graph.remove_spec()` method handles that for us.
log.trace("using PrefixGraph to remove records for %s", spec)
removed_records.extend(graph.remove_spec(spec))
for rec in removed_records:
# We keep specs (minus the feature part) for the non provides_features packages
# if they're in the history specs. Otherwise, we pop them from the specs_map.
rec_has_a_feature = set(rec.features or ()) & feature_names
if rec_has_a_feature and rec.name in specs_from_history_map:
spec = specs_map.get(rec.name, MatchSpec(rec.name))
spec._match_components.pop('features', None)
specs_map[spec.name] = spec
else:
specs_map.pop(rec.name, None)
solution = tuple(Dist(rec) for rec in graph.records)
if not removed_records and not prune:
raise PackagesNotFoundError(tuple(spec.name for spec in specs_to_remove))
# We handle as best as possible environments in inconsistent states. To do this,
# we remove now from consideration the set of packages causing inconsistencies,
# and then we add them back in following the main SAT call.
_, inconsistent_dists = r.bad_installed(solution, ())
add_back_map = {} # name: (dist, spec)
if log.isEnabledFor(DEBUG):
log.debug("inconsistent dists: %s",
dashlist(inconsistent_dists) if inconsistent_dists else 'None')
if inconsistent_dists:
for dist in inconsistent_dists:
# pop and save matching spec in specs_map
add_back_map[dist.name] = (dist, specs_map.pop(dist.name, None))
solution = tuple(dist for dist in solution if dist not in inconsistent_dists)
# For the remaining specs in specs_map, add target to each spec. `target` is a reference
# to the package currently existing in the environment. Setting target instructs the
# solver to not disturb that package if it's not necessary.
# If the spec.name is being modified by inclusion in specs_to_add, we don't set `target`,
# since we *want* the solver to modify/update that package.
#
# TLDR: when working with MatchSpec objects,
# - to minimize the version change, set MatchSpec(name=name, target=dist.full_name)
# - to freeze the package, set all the components of MatchSpec individually
for pkg_name, spec in iteritems(specs_map):
matches_for_spec = tuple(dist for dist in solution if spec.match(index[dist]))
if matches_for_spec:
if len(matches_for_spec) != 1:
raise CondaError(dals("""
Conda encountered an error with your environment. Please report an issue
at https://github.com/conda/conda/issues/new. In your report, please include
the output of 'conda info' and 'conda list' for the active environment, along
with the command you invoked that resulted in this error.
pkg_name: %s
spec: %s
matches_for_spec: %s
""") % (pkg_name, spec,
dashlist((text_type(s) for s in matches_for_spec), indent=4)))
target_dist = matches_for_spec[0]
if deps_modifier == DepsModifier.FREEZE_INSTALLED:
new_spec = MatchSpec(index[target_dist])
else:
target = Dist(target_dist).full_name
new_spec = MatchSpec(spec, target=target)
specs_map[pkg_name] = new_spec
if log.isEnabledFor(TRACE):
log.trace("specs_map with targets: %s", specs_map)
# If we're in UPDATE_ALL mode, we need to drop all the constraints attached to specs,
# so they can all float and the solver can find the most up-to-date solution. In the case
# of UPDATE_ALL, `specs_map` wasn't initialized with packages from the current environment,
# but *only* historically-requested specs. This lets UPDATE_ALL drop dependencies if
# they're no longer needed, and their presence would otherwise prevent the updated solution
# the user most likely wants.
if deps_modifier == DepsModifier.UPDATE_ALL:
specs_map = {pkg_name: MatchSpec(spec.name, optional=spec.optional)
for pkg_name, spec in iteritems(specs_map)}
# As a business rule, we never want to update python beyond the current minor version,
# unless that's requested explicitly by the user (which we actively discourage).
if 'python' in specs_map:
python_prefix_rec = prefix_data.get('python')
if python_prefix_rec:
python_spec = specs_map['python']
if not python_spec.get('version'):
pinned_version = get_major_minor_version(python_prefix_rec.version) + '.*'
specs_map['python'] = MatchSpec(python_spec, version=pinned_version)
# For the aggressive_update_packages configuration parameter, we strip any target
# that's been set.
if not context.offline:
for spec in context.aggressive_update_packages:
if spec.name in specs_map:
old_spec = specs_map[spec.name]
specs_map[spec.name] = MatchSpec(old_spec, target=None)
# add in explicitly requested specs from specs_to_add
# this overrides any name-matching spec already in the spec map
specs_map.update((s.name, s) for s in specs_to_add)
# collect additional specs to add to the solution
track_features_specs = pinned_specs = ()
if context.track_features:
track_features_specs = tuple(MatchSpec(x + '@') for x in context.track_features)
if not ignore_pinned:
pinned_specs = get_pinned_specs(self.prefix)
# As a business rule, we never want to downgrade conda below the current version,
# unless that's requested explicitly by the user (which we actively discourage).
if 'conda' in specs_map and paths_equal(self.prefix, context.conda_prefix):
conda_prefix_rec = prefix_data.get('conda')
if conda_prefix_rec:
conda_spec = specs_map['conda']
conda_in_specs_to_add_version = next(
(spec.get('version') for spec in specs_to_add if spec.name == "conda"), None
)
if not conda_in_specs_to_add_version:
conda_spec = MatchSpec(conda_spec, version=">=%s" % conda_prefix_rec.version)
if context.auto_update_conda:
conda_spec = MatchSpec(conda_spec, target=None)
specs_map['conda'] = conda_spec
final_environment_specs = IndexedSet(concatv(
itervalues(specs_map),
track_features_specs,
pinned_specs,
))
# We've previously checked `solution` for consistency (which at that point was the
# pre-solve state of the environment). Now we check our compiled set of
# `final_environment_specs` for the possibility of a solution. If there are conflicts,
# we can often avoid them by neutering specs that have a target (e.g. removing version
# constraint) and also making them optional. The result here will be less cases of
# `UnsatisfiableError` handed to users, at the cost of more packages being modified
# or removed from the environment.
conflicting_specs = r.get_conflicting_specs(tuple(final_environment_specs))
if log.isEnabledFor(DEBUG):
log.debug("conflicting specs: %s", dashlist(conflicting_specs))
for spec in conflicting_specs:
if spec.target:
final_environment_specs.remove(spec)
neutered_spec = MatchSpec(spec.name, target=spec.target, optional=True)
final_environment_specs.add(neutered_spec)
# Finally! We get to call SAT.
if log.isEnabledFor(DEBUG):
log.debug("final specs to add: %s",
dashlist(sorted(text_type(s) for s in final_environment_specs)))
solution = r.solve(tuple(final_environment_specs)) # return value is List[dist]
# add back inconsistent packages to solution
if add_back_map:
for name, (dist, spec) in iteritems(add_back_map):
if not any(d.name == name for d in solution):
solution.append(dist)
if spec:
final_environment_specs.add(spec)
# Special case handling for various DepsModifer flags. Maybe this block could be pulled
# out into its own non-public helper method?
if deps_modifier == DepsModifier.NO_DEPS:
# In the NO_DEPS case, we need to start with the original list of packages in the
# environment, and then only modify packages that match specs_to_add or
# specs_to_remove.
_no_deps_solution = IndexedSet(Dist(rec) for rec in prefix_data.iter_records())
only_remove_these = set(dist
for spec in specs_to_remove
for dist in _no_deps_solution
if spec.match(index[dist]))
_no_deps_solution -= only_remove_these
only_add_these = set(dist
for spec in specs_to_add
for dist in solution
if spec.match(index[dist]))
remove_before_adding_back = set(dist.name for dist in only_add_these)
_no_deps_solution = IndexedSet(dist for dist in _no_deps_solution
if dist.name not in remove_before_adding_back)
_no_deps_solution |= only_add_these
solution = _no_deps_solution
elif deps_modifier == DepsModifier.ONLY_DEPS:
# Using a special instance of PrefixGraph to remove youngest child nodes that match
# the original specs_to_add. It's important to remove only the *youngest* child nodes,
# because a typical use might be `conda install --only-deps python=2 flask`, and in
# that case we'd want to keep python.
graph = PrefixGraph((index[d] for d in solution), specs_to_add)
graph.remove_youngest_descendant_nodes_with_specs()
solution = tuple(Dist(rec) for rec in graph.records)
elif deps_modifier in (DepsModifier.UPDATE_DEPS, DepsModifier.UPDATE_DEPS_ONLY_DEPS):
# Here we have to SAT solve again :( It's only now that we know the dependency
# chain of specs_to_add.
specs_to_add_names = set(spec.name for spec in specs_to_add)
update_names = set()
graph = PrefixGraph((index[d] for d in solution), final_environment_specs)
for spec in specs_to_add:
node = graph.get_node_by_name(spec.name)
for ancestor_record in graph.all_ancestors(node):
ancestor_name = ancestor_record.name
if ancestor_name not in specs_to_add_names:
update_names.add(ancestor_name)
grouped_specs = groupby(lambda s: s.name in update_names, final_environment_specs)
new_final_environment_specs = set(grouped_specs.get(False, ()))
update_specs = set(MatchSpec(spec.name, optional=spec.optional)
for spec in grouped_specs.get(True, ()))
final_environment_specs = new_final_environment_specs | update_specs
solution = r.solve(final_environment_specs)
if deps_modifier == DepsModifier.UPDATE_DEPS_ONLY_DEPS:
# duplicated from DepsModifier.ONLY_DEPS
graph = PrefixGraph((index[d] for d in solution), specs_to_add)
graph.remove_youngest_descendant_nodes_with_specs()
solution = tuple(Dist(rec) for rec in graph.records)
if prune:
graph = PrefixGraph((index[d] for d in solution), final_environment_specs)
graph.prune()
solution = tuple(Dist(rec) for rec in graph.records)
self._check_solution(solution, pinned_specs)
solution = IndexedSet(r.dependency_sort({d.name: d for d in solution}))
log.debug("solved prefix %s\n"
" solved_linked_dists:\n"
" %s\n",
self.prefix, "\n ".join(text_type(d) for d in solution))
return IndexedSet(index[d] for d in solution)
def solve_for_diff(self, deps_modifier=NULL, prune=NULL, ignore_pinned=NULL,
force_remove=NULL, force_reinstall=False):
"""Gives the package references to remove from an environment, followed by
the package references to add to an environment.
Args:
deps_modifier (DepsModifier):
See :meth:`solve_final_state`.
prune (bool):
See :meth:`solve_final_state`.
ignore_pinned (bool):
See :meth:`solve_final_state`.
force_remove (bool):
See :meth:`solve_final_state`.
force_reinstall (bool):
For requested specs_to_add that are already satisfied in the environment,
instructs the solver to remove the package and spec from the environment,
and then add it back--possibly with the exact package instance modified,
depending on the spec exactness.
Returns:
Tuple[PackageRef], Tuple[PackageRef]:
A two-tuple of PackageRef sequences. The first is the group of packages to
remove from the environment, in sorted dependency order from leaves to roots.
The second is the group of packages to add to the environment, in sorted
dependency order from roots to leaves.
"""
final_precs = self.solve_final_state(deps_modifier, prune, ignore_pinned, force_remove)
previous_records = IndexedSet(self._index[d] for d in self._r.dependency_sort(
{prefix_rec.name: Dist(prefix_rec)
for prefix_rec in PrefixData(self.prefix).iter_records()}
))
unlink_precs = previous_records - final_precs
link_precs = final_precs - previous_records
def _add_to_unlink_and_link(rec):
link_precs.add(rec)
if prec in previous_records:
unlink_precs.add(rec)
# If force_reinstall is enabled, make sure any package in specs_to_add is unlinked then
# re-linked
if force_reinstall:
for spec in self.specs_to_add:
prec = next((rec for rec in final_precs if spec.match(rec)), None)
assert prec
_add_to_unlink_and_link(prec)
# add back 'noarch: python' packages to unlink and link if python version changes
python_spec = MatchSpec('python')
prev_python = next((rec for rec in previous_records if python_spec.match(rec)), None)
curr_python = next((rec for rec in final_precs if python_spec.match(rec)), None)
gmm = get_major_minor_version
if prev_python and curr_python and gmm(prev_python.version) != gmm(curr_python.version):
noarch_python_precs = (p for p in final_precs if p.noarch == NoarchType.python)
for prec in noarch_python_precs:
_add_to_unlink_and_link(prec)
unlink_precs = IndexedSet(reversed(sorted(unlink_precs,
key=lambda x: previous_records.index(x))))
link_precs = IndexedSet(sorted(link_precs, key=lambda x: final_precs.index(x)))
return unlink_precs, link_precs
def solve_for_transaction(self, deps_modifier=NULL, prune=NULL, ignore_pinned=NULL,
force_remove=NULL, force_reinstall=False):
"""Gives an UnlinkLinkTransaction instance that can be used to execute the solution
on an environment.
Args:
deps_modifier (DepsModifier):
See :meth:`solve_final_state`.
prune (bool):
See :meth:`solve_final_state`.
ignore_pinned (bool):
See :meth:`solve_final_state`.
force_remove (bool):
See :meth:`solve_final_state`.
force_reinstall (bool):
See :meth:`solve_for_diff`.
Returns:
UnlinkLinkTransaction:
"""
if self.prefix == context.root_prefix and context.enable_private_envs:
# This path has the ability to generate a multi-prefix transaction. The basic logic
# is in the commented out get_install_transaction() function below. Exercised at
# the integration level in the PrivateEnvIntegrationTests in test_create.py.
raise NotImplementedError()
else:
with Spinner("Solving environment", not context.verbosity and not context.quiet,
context.json):
unlink_precs, link_precs = self.solve_for_diff(deps_modifier, prune, ignore_pinned,
force_remove, force_reinstall)
stp = PrefixSetup(self.prefix, unlink_precs, link_precs,
self.specs_to_remove, self.specs_to_add)
# TODO: Only explicitly requested remove and update specs are being included in
# History right now. Do we need to include other categories from the solve?
self._notify_conda_outdated(link_precs)
return UnlinkLinkTransaction(stp)
def _notify_conda_outdated(self, link_precs):
if not context.notify_outdated_conda or context.quiet:
return
current_conda_prefix_rec = PrefixData(context.conda_prefix).get('conda', None)
if current_conda_prefix_rec:
channel_name = current_conda_prefix_rec.channel.canonical_name
if channel_name == UNKNOWN_CHANNEL:
channel_name = "defaults"
# only look for a newer conda in the channel conda is currently installed from
conda_newer_spec = MatchSpec('%s::conda>%s' % (channel_name, CONDA_VERSION))
if paths_equal(self.prefix, context.conda_prefix):
if any(conda_newer_spec.match(prec) for prec in link_precs):
return
conda_newer_precs = sorted(
SubdirData.query_all(conda_newer_spec, self.channels, self.subdirs),
key=lambda x: VersionOrder(x.version)
# VersionOrder is fine here rather than r.version_key because all precs
# should come from the same channel
)
if conda_newer_precs:
latest_version = conda_newer_precs[-1].version
# If conda comes from defaults, ensure we're giving instructions to users
# that should resolve release timing issues between defaults and conda-forge.
add_channel = "-c defaults " if channel_name == "defaults" else ""
print(dedent("""
==> WARNING: A newer version of conda exists. <==
current version: %s
latest version: %s
Please update conda by running
$ conda update -n base %sconda
""") % (CONDA_VERSION, latest_version, add_channel), file=sys.stderr)
def _prepare(self, prepared_specs):
# All of this _prepare() method is hidden away down here. Someday we may want to further
# abstract away the use of `index` or the Resolve object.
if self._prepared and prepared_specs == self._prepared_specs:
return self._index, self._r
if hasattr(self, '_index') and self._index:
# added in install_actions for conda-build back-compat
self._prepared_specs = prepared_specs
self._r = Resolve(self._index, channels=self.channels)
else:
# add in required channels that aren't explicitly given in the channels list
# For correctness, we should probably add to additional_channels any channel that
# is given by PrefixData(self.prefix).all_subdir_urls(). However that causes
# usability problems with bad / expired tokens.
additional_channels = set()
for spec in self.specs_to_add:
# TODO: correct handling for subdir isn't yet done
channel = spec.get_exact_value('channel')
if channel:
additional_channels.add(Channel(channel))
self.channels.update(additional_channels)
reduced_index = get_reduced_index(self.prefix, self.channels,
self.subdirs, prepared_specs)
self._prepared_specs = prepared_specs
self._index = reduced_index
self._r = Resolve(reduced_index, channels=self.channels)
self._prepared = True
return self._index, self._r
def _check_solution(self, solution, pinned_specs):
# Ensure that solution is consistent with pinned specs.
for spec in pinned_specs:
spec = MatchSpec(spec, optional=False)
if not any(spec.match(d) for d in solution):
# if the spec doesn't match outright, make sure there's no package by that
# name in the solution
assert not any(d.name == spec.name for d in solution)
# Let this be handled as part of txn.verify()
# # Ensure conda or its dependencies aren't being uninstalled in conda's
# # own environment.
# if paths_equal(self.prefix, context.conda_prefix) and not context.force:
# conda_spec = MatchSpec("conda")
# conda_dist = next((conda_spec.match(d) for d in solution), None)
# assert conda_dist
# conda_deps_specs = self._r.ms_depends(conda_dist)
# for spec in conda_deps_specs:
# assert any(spec.match(d) for d in solution)
def get_pinned_specs(prefix):
"""Find pinned specs from file and return a tuple of MatchSpec."""
pinfile = join(prefix, 'conda-meta', 'pinned')
if exists(pinfile):
with open(pinfile) as f:
from_file = (i for i in f.read().strip().splitlines()
if i and not i.strip().startswith('#'))
else:
from_file = ()
return tuple(MatchSpec(s, optional=True) for s in
concatv(context.pinned_packages, from_file))
# NOTE: The remaining code in this module is being left for development reference until
# the context.enable_private_envs portion is implemented in :meth:`solve_for_transaction`.
# def solve_prefix(prefix, r, specs_to_remove=(), specs_to_add=(), prune=False):
# # this function gives a "final state" for an existing prefix given just these simple inputs
# prune = context.prune or prune
# log.debug("solving prefix %s\n"
# " specs_to_remove: %s\n"
# " specs_to_add: %s\n"
# " prune: %s", prefix, specs_to_remove, specs_to_add, prune)
#
# # declare starting point
# solved_linked_dists = () if prune else tuple(iterkeys(linked_data(prefix)))
# # TODO: to change this whole function from working with dists to working with records, just
# # change iterkeys to itervalues
#
# if solved_linked_dists and specs_to_remove:
# solved_linked_dists = r.remove(tuple(text_type(s) for s in specs_to_remove),
# solved_linked_dists)
#
# specs_from_history = _get_relevant_specs_from_history(prefix, specs_to_remove, specs_to_add)
# augmented_specs_to_add = augment_specs(prefix, concatv(specs_from_history, specs_to_add))
#
# log.debug("final specs to add:\n %s\n",
# "\n ".join(text_type(s) for s in augmented_specs_to_add))
# solved_linked_dists = r.install(augmented_specs_to_add,
# solved_linked_dists,
# update_deps=context.update_dependencies)
#
# if not context.ignore_pinned:
# # TODO: assert all pinned specs are compatible with what's in solved_linked_dists
# pass
#
# # TODO: don't uninstall conda or its dependencies, probably need to check elsewhere
#
# solved_linked_dists = IndexedSet(r.dependency_sort({d.name: d for d in solved_linked_dists}))
#
# log.debug("solved prefix %s\n"
# " solved_linked_dists:\n"
# " %s\n",
# prefix, "\n ".join(text_type(d) for d in solved_linked_dists))
#
# return solved_linked_dists, specs_to_add
# def solve_for_actions(prefix, r, specs_to_remove=(), specs_to_add=(), prune=False):
# # this is not for force-removing packages, which doesn't invoke the solver
#
# solved_dists, _specs_to_add = solve_prefix(prefix, r, specs_to_remove, specs_to_add, prune)
# # TODO: this _specs_to_add part should be refactored when we can better pin package channel
# # origin # NOQA
# dists_for_unlinking, dists_for_linking = sort_unlink_link_from_solve(prefix, solved_dists,
# _specs_to_add)
#
# def remove_non_matching_dists(dists_set, specs_to_match):
# _dists_set = IndexedSet(dists_set)
# for dist in dists_set:
# for spec in specs_to_match:
# if spec.match(dist):
# break
# else: # executed if the loop ended normally (no break)
# _dists_set.remove(dist)
# return _dists_set
#
# if context.no_dependencies:
# # for `conda create --no-deps python=3 flask`, do we install python? yes
# # the only dists we touch are the ones that match a specs_to_add
# dists_for_linking = remove_non_matching_dists(dists_for_linking, specs_to_add)
# dists_for_unlinking = remove_non_matching_dists(dists_for_unlinking, specs_to_add)
# elif context.only_dependencies:
# # for `conda create --only-deps python=3 flask`, do we install python? yes
# # remove all dists that match a specs_to_add, as long as that dist isn't a dependency
# # of other specs_to_add
# _index = r.index
# _match_any = lambda spec, dists: next((dist for dist in dists
# if spec.match(_index[dist])),
# None)
# _is_dependency = lambda spec, dist: any(r.depends_on(s, dist.name)
# for s in specs_to_add if s != spec)
# for spec in specs_to_add:
# link_matching_dist = _match_any(spec, dists_for_linking)
# if link_matching_dist:
# if not _is_dependency(spec, link_matching_dist):
# # as long as that dist isn't a dependency of other specs_to_add
# dists_for_linking.remove(link_matching_dist)
# unlink_matching_dist = _match_any(spec, dists_for_unlinking)
# if unlink_matching_dist:
# dists_for_unlinking.remove(unlink_matching_dist)
#
# if context.force:
# dists_for_unlinking, dists_for_linking = forced_reinstall_specs(prefix, solved_dists,
# dists_for_unlinking,
# dists_for_linking,
# specs_to_add)
#
# dists_for_unlinking = IndexedSet(reversed(dists_for_unlinking))
# return dists_for_unlinking, dists_for_linking
# def sort_unlink_link_from_solve(prefix, solved_dists, remove_satisfied_specs):
# # solved_dists should be the return value of solve_prefix()
# old_linked_dists = IndexedSet(iterkeys(linked_data(prefix)))
#
# dists_for_unlinking = old_linked_dists - solved_dists
# dists_for_linking = solved_dists - old_linked_dists
#
# # TODO: add back 'noarch: python' to unlink and link if python version changes
#
# # r_linked = Resolve(linked_data(prefix))
# # for spec in remove_satisfied_specs:
# # if r_linked.find_matches(spec):
# # spec_name = spec.name
# # unlink_dist = next((d for d in dists_for_unlinking if d.name == spec_name), None)
# # link_dist = next((d for d in dists_for_linking if d.name == spec_name), None)
# # if unlink_dist:
# # dists_for_unlinking.discard(unlink_dist)
# # if link_dist:
# # dists_for_linking.discard(link_dist)
#
# return dists_for_unlinking, dists_for_linking
# def get_install_transaction(prefix, index, spec_strs, force=False, only_names=None,
# always_copy=False, pinned=True, update_deps=True,
# prune=False, channel_priority_map=None, is_update=False):
# # type: (str, Dict[Dist, Record], List[str], bool, Option[List[str]], bool, bool, bool,
# # bool, bool, bool, Dict[str, Sequence[str, int]]) -> List[Dict[weird]]
#
# # split out specs into potentially multiple preferred envs if:
# # 1. the user default env (root_prefix) is the prefix being considered here
# # 2. the user has not specified the --name or --prefix command-line flags
# if (prefix == context.root_prefix
# and not context.prefix_specified
# and prefix_is_writable(prefix)
# and context.enable_private_envs):
#
# # a registered package CANNOT be installed in the root env
# # if ANY package requesting a private env is required in the root env, all packages for
# # that requested env must instead be installed in the root env
#
# root_r = get_resolve_object(index.copy(), context.root_prefix)
#
# def get_env_for_spec(spec):
# # use resolve's get_dists_for_spec() to find the "best" matching record
# record_for_spec = root_r.index[root_r.get_dists_for_spec(spec, emptyok=False)[-1]]
# return ensure_pad(record_for_spec.preferred_env)
#
# # specs grouped by target env, the 'None' key holds the specs for the root env
# env_add_map = groupby(get_env_for_spec, (MatchSpec(s) for s in spec_strs))
# requested_root_specs_to_add = {s for s in env_add_map.pop(None, ())}
#
# ed = EnvsDirectory(join(context.root_prefix, 'envs'))
# registered_packages = ed.get_registered_packages_keyed_on_env_name()
#
# if len(env_add_map) == len(registered_packages) == 0:
# # short-circuit the rest of this logic
# return get_install_transaction_single(prefix, index, spec_strs, force, only_names,
# always_copy, pinned, update_deps,
# prune, channel_priority_map, is_update)
#
# root_specs_to_remove = set(MatchSpec(s.name) for s in concat(itervalues(env_add_map)))
# required_root_dists, _ = solve_prefix(context.root_prefix, root_r,
# specs_to_remove=root_specs_to_remove,
# specs_to_add=requested_root_specs_to_add,
# prune=True)
#
# required_root_package_names = tuple(d.name for d in required_root_dists)
#
# # first handle pulling back requested specs to root
# forced_root_specs_to_add = set()
# pruned_env_add_map = defaultdict(list)
# for env_name, specs in iteritems(env_add_map):
# for spec in specs:
# spec_name = MatchSpec(spec).name
# if spec_name in required_root_package_names:
# forced_root_specs_to_add.add(spec)
# else:
# pruned_env_add_map[env_name].append(spec)
# env_add_map = pruned_env_add_map
#
# # second handle pulling back registered specs to root
# env_remove_map = defaultdict(list)
# for env_name, registered_package_entries in iteritems(registered_packages):
# for rpe in registered_package_entries:
# if rpe['package_name'] in required_root_package_names:
# # ANY registered packages in this environment need to be pulled back
# for pe in registered_package_entries:
# # add an entry in env_remove_map
# # add an entry in forced_root_specs_to_add
# pname = pe['package_name']
# env_remove_map[env_name].append(MatchSpec(pname))
# forced_root_specs_to_add.add(MatchSpec(pe['requested_spec']))
# break
#
# unlink_link_map = odict()
#
# # solve all neede preferred_env prefixes
# for env_name in set(concatv(env_add_map, env_remove_map)):
# specs_to_add = env_add_map[env_name]
# spec_to_remove = env_remove_map[env_name]
# pfx = ed.preferred_env_to_prefix(env_name)
# unlink, link = solve_for_actions(pfx, get_resolve_object(index.copy(), pfx),
# specs_to_remove=spec_to_remove,
# specs_to_add=specs_to_add,
# prune=True)
# unlink_link_map[env_name] = unlink, link, specs_to_add
#
# # now solve root prefix
# # we have to solve root a second time in all cases, because this time we don't prune
# root_specs_to_add = set(concatv(requested_root_specs_to_add, forced_root_specs_to_add))
# root_unlink, root_link = solve_for_actions(context.root_prefix, root_r,
# specs_to_remove=root_specs_to_remove,
# specs_to_add=root_specs_to_add)
# if root_unlink or root_link:
# # this needs to be added to odict last; the private envs need to be updated first
# unlink_link_map[None] = root_unlink, root_link, root_specs_to_add
#
# def make_txn_setup(pfx, unlink, link, specs):
# # TODO: this index here is probably wrong; needs to be per-prefix
# return PrefixSetup(index, pfx, unlink, link, 'INSTALL',
# tuple(specs))
#
# txn_args = tuple(make_txn_setup(ed.to_prefix(ensure_pad(env_name)), *oink)
# for env_name, oink in iteritems(unlink_link_map))
# txn = UnlinkLinkTransaction(*txn_args)
# return txn
#
# else:
# # disregard any requested preferred env
# return get_install_transaction_single(prefix, index, spec_strs, force, only_names,
# always_copy, pinned, update_deps,
# prune, channel_priority_map, is_update)
| Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/core/solve.py | Python | apache-2.0 | 44,797 |
from django.conf import settings
USERNAME_COOKIE_NAME = getattr(settings, 'USERNAME_COOKIE_NAME', 'username')
MAX_COOKIE_SIZE = getattr(settings, 'MAX_COOKIE_SIZE', 4096)
| callowayproject/django-cookiesession | cookiesession/settings.py | Python | apache-2.0 | 173 |
from flask import Flask, render_template
import subprocess
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index(name=None):
return render_template('hello.html', name=name)
@app.route('/b', methods=['GET'])
def b_start():
subprocess.call(['python3', 'alarmclock.py'])
return 'Hello'
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| Xibuk/iot-alarm | button.py | Python | apache-2.0 | 378 |
# FiveDollarTenCents / memehk.py
# by Kobura.Nephilim
# version 2.0
# Tested on raspberry pi
#
# This python program will get the page from memehk,
# extract the youtube id of the live stream and
# pass the youtube id to livestreamer to record into mp4
import urllib2
import re
memehk_url='http://www.memehk.com/index.php?page=live&channel=1'
req = urllib2.Request(url=memehk_url)
resp = urllib2.urlopen(req)
text = resp.read()
youtubeid = re.finall('(?<=embed\/).{11}', text)
print youtubeid[1]
| kobura-nephilim/FiveDollarTenCents | memehk.py | Python | apache-2.0 | 497 |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for training including contrastive helpers."""
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
EPS = 1e-9
@tf.function
def cosine_similarity(x, y):
"""Computes cosine similarity between all pairs of vectors in x and y."""
x_expanded, y_expanded = x[:, tf.newaxis], y[tf.newaxis, :]
similarity_matrix = tf.reduce_sum(x_expanded * y_expanded, axis=-1)
similarity_matrix /= (
tf.norm(x_expanded, axis=-1) * tf.norm(y_expanded, axis=-1) + EPS)
return similarity_matrix
@tf.function
def sample_indices(dim_x, size=128, sort=False):
dim_x = tf.cast(dim_x, tf.int32)
indices = tf.range(0, dim_x, dtype=tf.int32)
indices = tf.random.shuffle(indices)[:size]
if sort:
indices = tf.sort(indices)
return indices
@tf.function
def representation_alignment_loss(nn_model,
optimal_data_tuple,
use_coupling_weights=False,
coupling_temperature=0.1,
return_representation=False,
temperature=1.0):
"""PSE loss."""
obs1, obs2, metric_vals = optimal_data_tuple
if np.random.randint(2) == 1:
obs2, obs1 = obs1, obs2
metric_vals = tf.transpose(metric_vals)
indices = sample_indices(tf.shape(metric_vals)[0], sort=return_representation)
obs1 = tf.gather(obs1, indices, axis=0)
metric_vals = tf.gather(metric_vals, indices, axis=0)
representation_1 = nn_model.representation({'pixels': obs1})
representation_2 = nn_model.representation({'pixels': obs2})
similarity_matrix = cosine_similarity(representation_1, representation_2)
alignment_loss = contrastive_loss(
similarity_matrix,
metric_vals,
temperature,
coupling_temperature=coupling_temperature,
use_coupling_weights=use_coupling_weights)
if return_representation:
return alignment_loss, similarity_matrix
else:
return alignment_loss
@tf.function
def contrastive_loss(similarity_matrix,
metric_values,
temperature,
coupling_temperature=1.0,
use_coupling_weights=True):
"""Contrative Loss with soft coupling."""
logging.info('Using alternative contrastive loss.')
metric_shape = tf.shape(metric_values)
similarity_matrix /= temperature
neg_logits1 = similarity_matrix
col_indices = tf.cast(tf.argmin(metric_values, axis=1), dtype=tf.int32)
pos_indices1 = tf.stack(
(tf.range(metric_shape[0], dtype=tf.int32), col_indices), axis=1)
pos_logits1 = tf.gather_nd(similarity_matrix, pos_indices1)
if use_coupling_weights:
metric_values /= coupling_temperature
coupling = tf.exp(-metric_values)
pos_weights1 = -tf.gather_nd(metric_values, pos_indices1)
pos_logits1 += pos_weights1
negative_weights = tf.math.log((1.0 - coupling) + EPS)
neg_logits1 += tf.tensor_scatter_nd_update(negative_weights, pos_indices1,
pos_weights1)
neg_logits1 = tf.math.reduce_logsumexp(neg_logits1, axis=1)
return tf.reduce_mean(neg_logits1 - pos_logits1)
def _get_action(replay):
if isinstance(replay, list):
return np.array([x.action for x in replay])
else:
return replay.action
def _calculate_action_cost_matrix(ac1, ac2):
diff = tf.expand_dims(ac1, axis=1) - tf.expand_dims(ac2, axis=0)
return tf.cast(tf.reduce_mean(tf.abs(diff), axis=-1), dtype=tf.float32)
def metric_fixed_point_fast(cost_matrix, gamma=0.99, eps=1e-7):
"""Dynamic prograaming for calculating PSM."""
d = np.zeros_like(cost_matrix)
def operator(d_cur):
d_new = 1 * cost_matrix
discounted_d_cur = gamma * d_cur
d_new[:-1, :-1] += discounted_d_cur[1:, 1:]
d_new[:-1, -1] += discounted_d_cur[1:, -1]
d_new[-1, :-1] += discounted_d_cur[-1, 1:]
return d_new
while True:
d_new = operator(d)
if np.sum(np.abs(d - d_new)) < eps:
break
else:
d = d_new[:]
return d
def compute_metric(replay1, replay2, gamma):
actions1, actions2 = _get_action(replay1), _get_action(replay2)
action_cost = _calculate_action_cost_matrix(actions1, actions2)
return tf_metric_fixed_point(action_cost, gamma=gamma)
@tf.function
def tf_metric_fixed_point(action_cost_matrix, gamma):
return tf.numpy_function(
metric_fixed_point_fast, [action_cost_matrix, gamma], Tout=tf.float32)
| google-research/google-research | pse/dm_control/utils/helper_utils.py | Python | apache-2.0 | 5,035 |
# Copyright (c) 2016 NTT All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import six
from congress.datasources import constants
from congress.datasources import datasource_driver
class DoctorDriver(datasource_driver.PushedDataSourceDriver):
"""A DataSource Driver for OPNFV Doctor project.
This driver has a table for Doctor project's Inspector. Please check
https://wiki.opnfv.org/display/doctor/Doctor+Home for the details
about OPNFV Doctor project.
To update the table, call Update row API.
PUT /v1/data-sources/<the driver id>/tables/<table id>/rows
For updating 'events' table, the request body should be following
style. The request will replace all rows in the table with the body,
which means if you update the table with [] it will clear the table.
One {} object in the list represents one row of the table.
request body:
[
{
"time": "2016-02-22T11:48:55Z",
"type": "compute.host.down",
"details": {
"hostname": "compute1",
"status": "down",
"monitor": "zabbix1",
"monitor_event_id": "111"
}
},
.....
]
"""
value_trans = {'translation-type': 'VALUE'}
def safe_id(x):
if isinstance(x, six.string_types):
return x
try:
return x['id']
except Exception:
return str(x)
def flatten_events(row_events):
flatten = []
for event in row_events:
details = event.pop('details')
for k, v in details.items():
event[k] = v
flatten.append(event)
return flatten
events_translator = {
'translation-type': 'HDICT',
'table-name': 'events',
'selector-type': 'DICT_SELECTOR',
'objects-extract-fn': flatten_events,
'field-translators':
({'fieldname': 'time', 'translator': value_trans},
{'fieldname': 'type', 'translator': value_trans},
{'fieldname': 'hostname', 'translator': value_trans},
{'fieldname': 'status', 'translator': value_trans},
{'fieldname': 'monitor', 'translator': value_trans},
{'fieldname': 'monitor_event_id', 'translator': value_trans},)
}
TRANSLATORS = [events_translator]
def __init__(self, name='', args=None):
super(DoctorDriver, self).__init__(name, args=args)
@staticmethod
def get_datasource_info():
result = {}
result['id'] = 'doctor'
result['description'] = ('Datasource driver that allows external '
'systems to push data in accordance with '
'OPNFV Doctor Inspector southbound interface '
'specification.')
result['config'] = {'persist_data': constants.OPTIONAL}
return result
| ramineni/my_congress | congress/datasources/doctor_driver.py | Python | apache-2.0 | 3,563 |
# -*- coding: utf8 -*-
# 그래프, 수학 기능을 담고 있는 pylab 모듈의 모든 기능을 불러들임
from pylab import *
# data 준비 시작
# normal distribution center at x=0 and y=5
# x = 0, y = 5 점을 중심으로 한 정규 분포 생성
x = randn(100000)
y = randn(100000) + 5
# 2차원 히스토그램
# 40개의 구간을 생성
H, xedges, yedges = histogram2d(x, y, bins=40)
# histogram 을 표시할 범위를 지정
# yedges 최소 최대값, xedges 최소 최대값으로 정함
extent = (yedges[0], yedges[-1], xedges[-1], xedges[0])
# data 준비 끝
# 그래프 준비 시작
# histogram 을 표시
# bitmap 으로 취급하여 표시함
plt.imshow(H, extent=extent, interpolation='nearest')
colorbar()
# 그래프 준비 끝
# 그래프 표시
show()
# http://matplotlib.org/examples/pylab_examples/hist2d_log_demo.html
| kangwonlee/ECA | lab_01_intro/07_hist2d_log_demo.py | Python | apache-2.0 | 857 |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Test Module for data_fetcher.py"""
# pylint: disable=W0212
import pytest
from data_fetcher import DataFetcher
class TestDataFetcher:
"""Test Class for data_fetcher.py"""
@pytest.mark.parametrize('numbers,value,expected', [
([0, 2, 4, 6, 8, 10, 12], -1, 0),
([0, 2, 4, 6, 8, 10, 12], 0, 0),
([0, 2, 4, 6, 8, 10, 12], 1, 0),
([0, 2, 4, 6, 8, 10, 12], 2, 0),
([0, 2, 4, 6, 8, 10, 12], 3, 1),
([0, 2, 4, 6, 8, 10, 12], 4, 1),
([0, 2, 4, 6, 8, 10, 12], 5, 2),
([0, 2, 4, 6, 8, 10, 12], 6, 2),
([0, 2, 4, 6, 8, 10, 12], 12, 5),
([0, 2, 4, 6, 8, 10, 12], 13, 6),
([0, 2, 4, 6, 8, 10, 12], 100, 6),
])
def test__binary_search_increasing(self, numbers, value, expected):
"""Tests binary search with list of numbers in increasing order."""
preprocess = DataFetcher('dummy', 'dummy')
assert preprocess._binary_search(numbers, value, False) == expected
@pytest.mark.parametrize('numbers,value,expected', [
([10, 8, 6, 4, 2, 0], 100, 0),
([10, 8, 6, 4, 2, 0], 10, 0),
([10, 8, 6, 4, 2, 0], 9, 0),
([10, 8, 6, 4, 2, 0], 8, 1),
([10, 8, 6, 4, 2, 0], 7, 1),
([10, 8, 6, 4, 2, 0], 4, 3),
([10, 8, 6, 4, 2, 0], 3, 3),
([10, 8, 6, 4, 2, 0], 1, 4),
([10, 8, 6, 4, 2, 0], 0, 5),
([10, 8, 6, 4, 2, 0], -1, 5),
])
def test__binary_search_decreasing(self, numbers, value, expected):
"""Tests binary search with list of numbers in decreasing order."""
preprocess = DataFetcher('dummy', 'dummy')
assert preprocess._binary_search(numbers, value, True) == expected
| googleinterns/power-data-graphing-intern-2020 | backend/data_fetcher_test.py | Python | apache-2.0 | 2,341 |
from HARK.ConsumptionSaving.ConsPrefShockModel import (
PrefShockConsumerType,
KinkyPrefConsumerType,
)
import numpy as np
import unittest
class testPrefShockConsumerType(unittest.TestCase):
def setUp(self):
self.agent = PrefShockConsumerType()
self.agent.cycles = 0
self.agent.solve()
def test_solution(self):
self.assertEqual(self.agent.solution[0].mNrmMin, 0)
m = np.linspace(self.agent.solution[0].mNrmMin, 5, 200)
self.assertAlmostEqual(self.agent.PrefShkDstn[0].X[5], 0.69046812)
self.assertAlmostEqual(
self.agent.solution[0].cFunc(m, np.ones_like(m))[35], 0.8123891603954809
)
self.assertAlmostEqual(
self.agent.solution[0].cFunc.derivativeX(m, np.ones_like(m))[35],
0.44973706445183886,
)
def test_simulation(self):
self.agent.T_sim = 10
self.agent.track_vars = ['cNrm', "PrefShk"]
self.agent.make_shock_history() # This is optional
self.agent.initialize_sim()
self.agent.simulate()
self.assertAlmostEqual(self.agent.history['cNrm'][0][5], 0.7366020536567589)
self.assertEqual(
self.agent.shock_history["PrefShk"][0][5],
self.agent.history["PrefShk"][0][5],
)
self.assertEqual(self.agent.history["PrefShk"][0][5], 0.4909415933881665)
class testKinkyPrefConsumerType(unittest.TestCase):
def setUp(self):
self.agent = KinkyPrefConsumerType()
self.agent.cycles = 0 # Infinite horizon
self.agent.solve()
def test_solution(self):
self.assertAlmostEqual(self.agent.solution[0].mNrmMin, -0.7555156106287383)
m = np.linspace(self.agent.solution[0].mNrmMin, 5, 200)
self.assertAlmostEqual(self.agent.PrefShkDstn[0].X[5], 0.6904681186891202)
c = self.agent.solution[0].cFunc(m, np.ones_like(m))
self.assertAlmostEqual(c[5], 0.13237946)
k = self.agent.solution[0].cFunc.derivativeX(m, np.ones_like(m))
self.assertAlmostEqual(k[5], 0.91443463)
self.agent.solution[0].vFunc
self.agent.solution[0].mNrmMin
def test_simulation(self):
self.agent.T_sim = 10
self.agent.track_vars = ['cNrm', "PrefShk"]
self.agent.initialize_sim()
self.agent.simulate()
self.assertAlmostEqual(self.agent.history['cNrm'][0][5], 0.7717096928111515)
| econ-ark/HARK | HARK/ConsumptionSaving/tests/test_ConsPrefShockModel.py | Python | apache-2.0 | 2,426 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from oslo_utils import timeutils
from trove.backup import models as bkup_models
from trove.backup import state
from trove.common import exception as t_exception
from trove.common.instance import ServiceStatuses
from trove.common import utils
from trove.conductor import manager as conductor_manager
from trove.instance import models as t_models
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
# See LP bug #1255178
OLD_DBB_SAVE = bkup_models.DBBackup.save
class ConductorMethodTests(trove_testtools.TestCase):
def setUp(self):
# See LP bug #1255178
bkup_models.DBBackup.save = OLD_DBB_SAVE
super(ConductorMethodTests, self).setUp()
util.init_db()
self.cond_mgr = conductor_manager.Manager()
self.instance_id = utils.generate_uuid()
def tearDown(self):
super(ConductorMethodTests, self).tearDown()
def _create_iss(self):
new_id = utils.generate_uuid()
iss = t_models.InstanceServiceStatus(
id=new_id,
instance_id=self.instance_id,
status=ServiceStatuses.NEW)
iss.save()
return new_id
def _get_iss(self, id):
return t_models.InstanceServiceStatus.find_by(id=id)
def _create_backup(self, name='fake backup'):
new_id = utils.generate_uuid()
backup = bkup_models.DBBackup.create(
id=new_id,
name=name,
description='This is a fake backup object.',
tenant_id=utils.generate_uuid(),
state=state.BackupState.NEW,
instance_id=self.instance_id)
backup.save()
return new_id
def _get_backup(self, id):
return bkup_models.DBBackup.find_by(id=id)
# --- Tests for heartbeat ---
def test_heartbeat_instance_not_found(self):
new_id = utils.generate_uuid()
self.assertRaises(t_exception.ModelNotFoundError,
self.cond_mgr.heartbeat, None, new_id, {})
@patch('trove.conductor.manager.LOG')
def test_heartbeat_instance_no_changes(self, mock_logging):
iss_id = self._create_iss()
old_iss = self._get_iss(iss_id)
self.cond_mgr.heartbeat(None, self.instance_id, {})
new_iss = self._get_iss(iss_id)
self.assertEqual(old_iss.status_id, new_iss.status_id)
self.assertEqual(old_iss.status_description,
new_iss.status_description)
@patch('trove.conductor.manager.LOG')
def test_heartbeat_instance_status_bogus_change(self, mock_logging):
iss_id = self._create_iss()
old_iss = self._get_iss(iss_id)
new_status = 'potato salad'
payload = {
'service_status': new_status,
}
self.assertRaises(ValueError, self.cond_mgr.heartbeat,
None, self.instance_id, payload)
new_iss = self._get_iss(iss_id)
self.assertEqual(old_iss.status_id, new_iss.status_id)
self.assertEqual(old_iss.status_description,
new_iss.status_description)
@patch('trove.conductor.manager.LOG')
def test_heartbeat_instance_status_changed(self, mock_logging):
iss_id = self._create_iss()
payload = {'service_status': ServiceStatuses.BUILDING.description}
self.cond_mgr.heartbeat(None, self.instance_id, payload)
iss = self._get_iss(iss_id)
self.assertEqual(ServiceStatuses.BUILDING, iss.status)
# --- Tests for update_backup ---
def test_backup_not_found(self):
new_bkup_id = utils.generate_uuid()
self.assertRaises(t_exception.ModelNotFoundError,
self.cond_mgr.update_backup,
None, self.instance_id, new_bkup_id)
@patch('trove.conductor.manager.LOG')
def test_backup_instance_id_nomatch(self, mock_logging):
new_iid = utils.generate_uuid()
bkup_id = self._create_backup('nomatch')
old_name = self._get_backup(bkup_id).name
self.cond_mgr.update_backup(None, new_iid, bkup_id,
name="remains unchanged")
bkup = self._get_backup(bkup_id)
self.assertEqual(old_name, bkup.name)
@patch('trove.conductor.manager.LOG')
def test_backup_bogus_fields_not_changed(self, mock_logging):
bkup_id = self._create_backup('bogus')
self.cond_mgr.update_backup(None, self.instance_id, bkup_id,
not_a_valid_field="INVALID")
bkup = self._get_backup(bkup_id)
self.assertFalse(hasattr(bkup, 'not_a_valid_field'))
@patch('trove.conductor.manager.LOG')
def test_backup_real_fields_changed(self, mock_logging):
bkup_id = self._create_backup('realrenamed')
new_name = "recently renamed"
self.cond_mgr.update_backup(None, self.instance_id, bkup_id,
name=new_name)
bkup = self._get_backup(bkup_id)
self.assertEqual(new_name, bkup.name)
# --- Tests for discarding old messages ---
@patch('trove.conductor.manager.LOG')
def test_heartbeat_newer_timestamp_accepted(self, mock_logging):
new_p = {'service_status': ServiceStatuses.NEW.description}
build_p = {'service_status': ServiceStatuses.BUILDING.description}
iss_id = self._create_iss()
iss = self._get_iss(iss_id)
now = timeutils.utcnow_ts(microsecond=True)
future = now + 60
self.cond_mgr.heartbeat(None, self.instance_id, new_p, sent=now)
self.cond_mgr.heartbeat(None, self.instance_id, build_p, sent=future)
iss = self._get_iss(iss_id)
self.assertEqual(ServiceStatuses.BUILDING, iss.status)
@patch('trove.conductor.manager.LOG')
def test_heartbeat_older_timestamp_discarded(self, mock_logging):
new_p = {'service_status': ServiceStatuses.NEW.description}
build_p = {'service_status': ServiceStatuses.BUILDING.description}
iss_id = self._create_iss()
iss = self._get_iss(iss_id)
now = timeutils.utcnow_ts(microsecond=True)
past = now - 60
self.cond_mgr.heartbeat(None, self.instance_id, new_p, sent=past)
self.cond_mgr.heartbeat(None, self.instance_id, build_p, sent=past)
iss = self._get_iss(iss_id)
self.assertEqual(ServiceStatuses.NEW, iss.status)
def test_backup_newer_timestamp_accepted(self):
old_name = "oldname"
new_name = "renamed"
bkup_id = self._create_backup(old_name)
bkup = self._get_backup(bkup_id)
now = timeutils.utcnow_ts(microsecond=True)
future = now + 60
self.cond_mgr.update_backup(None, self.instance_id, bkup_id,
sent=now, name=old_name)
self.cond_mgr.update_backup(None, self.instance_id, bkup_id,
sent=future, name=new_name)
bkup = self._get_backup(bkup_id)
self.assertEqual(new_name, bkup.name)
def test_backup_older_timestamp_discarded(self):
old_name = "oldname"
new_name = "renamed"
bkup_id = self._create_backup(old_name)
bkup = self._get_backup(bkup_id)
now = timeutils.utcnow_ts(microsecond=True)
past = now - 60
self.cond_mgr.update_backup(None, self.instance_id, bkup_id,
sent=now, name=old_name)
self.cond_mgr.update_backup(None, self.instance_id, bkup_id,
sent=past, name=new_name)
bkup = self._get_backup(bkup_id)
self.assertEqual(old_name, bkup.name)
| zhangg/trove | trove/tests/unittests/conductor/test_methods.py | Python | apache-2.0 | 8,262 |
from django.contrib import admin
from models import Media
admin.site.register(Media) | jamesmfriedman/django-primer | primer/media/admin.py | Python | apache-2.0 | 85 |
from datetime import date
from django.contrib.humanize.templatetags.humanize import ordinal
from django.core import urlresolvers
from django.shortcuts import get_object_or_404
from django.views.generic import list_detail, date_based, simple
from project_utils import annotate, get_page
from site_preferences.utils import get_cached_site_prefs
from models import BlogEntry
from settings import BloggingSettings
from utils import get_cached_blog_entry_headline
@annotate(breadcrumb=get_cached_site_prefs().blog_title)
def entry_list(request):
"""A view of a blog index page"""
return list_detail.object_list(request,
queryset= BlogEntry.objects.published(),
paginate_by = get_cached_site_prefs().blog_entries_per_page,
page = get_page(request),
template_name = 'blogging/blog_entry_list.html',
template_object_name = "entry"
)
@annotate(breadcrumb=get_cached_blog_entry_headline)
def entry_detail(request, year=None, month=None, day=None, slug=None):
"""A view of a blog entry"""
entry = get_object_or_404(BlogEntry.objects.published(),
pub_date__year=year,
pub_date__month=month,
pub_date__day=day,
slug=slug
)
admin_url = urlresolvers.reverse(
'admin:blogging_blogentry_change',
args=(entry.id,)
)
return simple.direct_to_template(request,
template="blogging/blog_entry_detail.html",
extra_context={'entry': entry, 'admin_url': admin_url}
)
@annotate(breadcrumb="Archive")
def archive_index(request, blog=None):
"""
A view of the years and months on which any blog entry was published
"""
dates = BlogEntry.objects.get_entry_dates()
return simple.direct_to_template(request,
template = 'blogging/blog_archive_index.html',
extra_context = {
'dates': dates,
}
)
# Date-based archive page views:
@annotate(breadcrumb=lambda year, month, day:
ordinal(date(int(year), int(month), int(day)).strftime("%e")))
def archive_day(request, year=None, month=None, day=None):
"""A view of posts published on a given day"""
return date_based.archive_day(request,
queryset=BlogEntry.objects.published(),
template_name="blogging/blog_archive_list.html",
date_field='pub_date',
year=year,
month=month,
month_format="%m",
day=day,
extra_context={
'archive_type': 'day',
'date': date(int(year), int(month), int(day))},
template_object_name='entry'
)
@annotate(breadcrumb=lambda year, month: date(int(year), \
int(month), 1).strftime("%B"))
def archive_month(request, year=None, month=None):
"""A view of posts published on a given month"""
return date_based.archive_month(request,
queryset=BlogEntry.objects.published(),
template_name="blogging/blog_archive_list.html",
date_field='pub_date',
year=year,
month=month,
month_format="%m",
extra_context={
'archive_type': 'month',
'date': date(int(year), int(month), 1)},
template_object_name='entry'
)
@annotate(breadcrumb=lambda year: date(int(year), 1, 1).strftime("%Y"))
def archive_year(request, year=None):
"""A view of posts published in a given year"""
return date_based.archive_year(request,
queryset=BlogEntry.objects.published(),
template_name="blogging/blog_archive_list.html",
date_field='pub_date',
year=year,
make_object_list=True,
extra_context={
'archive_type': 'year',
'date': date(int(year), 1, 1)},
template_object_name='entry'
) | mazelife/django-belleville | belleville/blogging/blog_views.py | Python | apache-2.0 | 3,737 |
#!/usr/bin/python
import Pydap as pd
def get_object(url):
return open_url(url)
def get_variables(url):
return data.keys():
def get_grids(url):
data = open_url(url)
# dataset = open_url() -> structure
# vars = data.keys()
# var.dimensions -> structure
# var.{shape,type}
# var.attributes -> dict
# var.attrib -> value
# data = var[slices]
def bbox_dap(url,bbox_list):
# url could be a file or catalog listing of files
# assume grids are same for all files
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage:"
print " ", sys.argv[0], "data_file_url list_bounding_boxes"
exit()
else:
bbox_dap(sys.argv[1],sys.arg[2])
| KimberleyOpie/common-tools | shape-dap/bbox_dap.py | Python | apache-2.0 | 706 |
# Copyright 2022 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import re
import shlex
import subprocess
from google.api_core.exceptions import NotFound
import google.auth
from google.cloud import bigquery
from google.cloud import storage
from google.cloud.retail import ProductDetail, PurgeUserEventsRequest, \
UserEvent, UserEventServiceClient, WriteUserEventRequest
from google.cloud.retail_v2 import Product
from google.protobuf.timestamp_pb2 import Timestamp
project_id = google.auth.default()[1]
default_catalog = "projects/{0}/locations/global/catalogs/default_catalog".format(
project_id)
# get user event
def get_user_event(visitor_id):
timestamp = Timestamp()
timestamp.seconds = int(datetime.datetime.now().timestamp())
product = Product()
product.id = 'test_id'
product_detail = ProductDetail()
product_detail.product = product
user_event = UserEvent()
user_event.event_type = "detail-page-view"
user_event.visitor_id = visitor_id
user_event.event_time = timestamp
user_event.product_details = [product_detail]
print(user_event)
return user_event
# write user event
def write_user_event(visitor_id):
write_user_event_request = WriteUserEventRequest()
write_user_event_request.user_event = get_user_event(visitor_id)
write_user_event_request.parent = default_catalog
user_event = UserEventServiceClient().write_user_event(
write_user_event_request)
print("---the user event is written---")
print(user_event)
return user_event
# purge user event
def purge_user_event(visitor_id):
purge_user_event_request = PurgeUserEventsRequest()
purge_user_event_request.filter = 'visitorId="{}"'.format(visitor_id)
purge_user_event_request.parent = default_catalog
purge_user_event_request.force = True
purge_operation = UserEventServiceClient().purge_user_events(
purge_user_event_request)
print("---the purge operation was started:----")
print(purge_operation.operation.name)
def get_project_id():
get_project_command = "gcloud config get-value project --format json"
config = subprocess.check_output(shlex.split(get_project_command))
project_id = re.search('\"(.*?)\"', str(config)).group(1)
return project_id
def create_bucket(bucket_name: str):
"""Create a new bucket in Cloud Storage"""
print("Creating new bucket:" + bucket_name)
buckets_in_your_project = list_buckets()
if bucket_name in buckets_in_your_project:
print("Bucket {} already exists".format(bucket_name))
else:
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
bucket.storage_class = "STANDARD"
new_bucket = storage_client.create_bucket(bucket, location="us")
print(
"Created bucket {} in {} with storage class {}".format(
new_bucket.name, new_bucket.location, new_bucket.storage_class
)
)
return new_bucket
def delete_bucket(bucket_name: str):
"""Delete a bucket from Cloud Storage"""
storage_client = storage.Client()
print("Deleting bucket:" + bucket_name)
buckets_in_your_project = list_buckets()
if bucket_name in buckets_in_your_project:
blobs = storage_client.list_blobs(bucket_name)
for blob in blobs:
blob.delete()
bucket = storage_client.get_bucket(bucket_name)
bucket.delete()
print("Bucket {} is deleted".format(bucket.name))
else:
print("Bucket {} is not found".format(bucket_name))
def list_buckets():
"""Lists all buckets"""
bucket_list = []
storage_client = storage.Client()
buckets = storage_client.list_buckets()
for bucket in buckets:
bucket_list.append(bucket.name)
return bucket_list
def upload_blob(bucket_name, source_file_name):
"""Uploads a file to the bucket."""
# The path to your file to upload
# source_file_name = "local/path/to/file"
print("Uploading data form {} to the bucket {}".format(source_file_name,
bucket_name))
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
object_name = re.search('resources/(.*?)$', source_file_name).group(1)
blob = bucket.blob(object_name)
blob.upload_from_filename(source_file_name)
print(
"File {} uploaded to {}.".format(
source_file_name, object_name
)
)
def create_bq_dataset(dataset_name):
"""Create a BigQuery dataset"""
full_dataset_id = f"{project_id}.{dataset_name}"
bq = bigquery.Client()
print(f"Creating dataset {full_dataset_id}")
try:
bq.get_dataset(full_dataset_id)
print(f"dataset {full_dataset_id} already exists")
except NotFound:
# Construct a Dataset object to send to the API.
dataset = bq.Dataset(full_dataset_id)
dataset.location = "US"
bq.create_dataset(dataset)
print("dataset is created")
def create_bq_table(dataset, table_name, schema_file_path):
"""Create a BigQuery table"""
full_table_id = f"{project_id}.{dataset}.{table_name}"
bq = bigquery.Client()
print(f"Creating BigQuery table {full_table_id}")
try:
bq.get_table(full_table_id)
print(f"table {full_table_id} already exists")
except NotFound:
# Construct a Table object to send to the API.
with open(schema_file_path, "rb") as schema:
schema_dict = json.load(schema)
table = bigquery.Table(full_table_id, schema=schema_dict)
bq.create_table(table)
print("table is created")
def delete_bq_table(dataset, table_name):
full_table_id = f"{project_id}.{dataset}.{table_name}"
bq = bigquery.Client()
bq.delete_table(full_table_id, not_found_ok=True)
print("Table '{}' is deleted.".format(full_table_id))
def upload_data_to_bq_table(dataset, table_name, source, schema_file_path):
"""Upload data to the table from specified source file"""
full_table_id = f"{project_id}.{dataset}.{table_name}"
bq = bigquery.Client()
print(f"Uploading data from {source} to the table {full_table_id}")
with open(schema_file_path, "rb") as schema:
schema_dict = json.load(schema)
job_config = bigquery.LoadJobConfig(
source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON,
schema=schema_dict)
with open(source, "rb") as source_file:
job = bq.load_table_from_file(source_file, full_table_id,
job_config=job_config)
job.result() # Waits for the job to complete.
print("data was uploaded")
| googleapis/python-retail | samples/interactive-tutorials/events/setup_events/setup_cleanup.py | Python | apache-2.0 | 7,256 |
#!/usr/bin/env python
###############################################################################
#
# Copyright 2010 Locomatix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
import sys
import locomatix
import locomatix.lql as lql
from _utils import *
def search_nearby():
"""docstring for search_nearby"""
parser = locomatix.ArgsParser()
parser.add_description("Finds all objects within a region around a given object")
parser.add_arg('feed', 'Name of the feed of paren object')
parser.add_arg('objectid', 'Object around which to search')
parser.add_arg('radius', 'Radius of search region in meters')
parser.add_arg('from-feed','Feed to include in search')
args = parser.parse_args(sys.argv)
try:
lxclient = locomatix.Client(args['custid'], \
args['key'], \
args['secret-key'], \
args['host'], \
args['port'])
except:
print "Unable to connect to %s at port %d" % (args['host'],args['port'])
sys.exit(1)
try:
objectid = args['objectid']
feed = args['feed']
region = locomatix.Circle(float(args['radius']))
from_feed = args['from-feed']
predicate = lql.SelectObjectLocation(from_feed)
start_key = locomatix.DEFAULT_FETCH_STARTKEY
fetch_size = locomatix.DEFAULT_FETCH_SIZE
while True:
batch = lxclient._request('search_nearby', objectid, feed, region, predicate._query, start_key, fetch_size)
dprint(args, lxclient.response_body(), '\n'.join('%s' % obj for obj in batch.objlocs))
if batch.next_key == None:
break # this is the last batch
start_key = batch.next_key
except locomatix.LxException, e:
dprint(args, lxclient.response_body(), \
"error: failed to retrieve search nearby list for (%s in %s) - %s" % (objectid, feed, str(e)))
sys.exit(1)
except KeyboardInterrupt:
sys.exit(1)
if __name__ == '__main__':
search_nearby()
| locomatix/locomatix-python | locomatix/cli/search_nearby.py | Python | apache-2.0 | 2,578 |
"""
Contains the definition of the Inception Resnet V2 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
import mxnet as mx
import proposal
import proposal_target
from rcnn.config import config
eps = 0.001
use_global_stats = True
#mx.symbol.softmax()
def ConvFactory(data, num_filter, kernel, stride=(1, 1), pad=(0, 0), act_type="relu", mirror_attr={}, with_act=True,name="",no_bias=True):
"""
convFactory contains the conv layer , batchnormal and relu
:param data: input data
:param num_filter: number of conv layer
:param kernel: kernal size
:param stride: stride number
:param pad:
:param act_type: RELU or not
:param mirror_attr:
:param with_act:
:param name : convFactoryName
:return: filterd data
"""
if name == "":
conv = mx.symbol.Convolution(
data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad)
# bn = mx.symbol.BatchNorm(data=conv)
bn = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=eps, use_global_stats=use_global_stats)
if with_act:
act = mx.symbol.Activation(
data=bn, act_type=act_type, attr=mirror_attr)
return act
else:
return bn
else:
conv = mx.symbol.Convolution(
data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad,name=name,no_bias=no_bias)
# bn = mx.symbol.BatchNorm(data=conv)
bn = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=eps, use_global_stats=use_global_stats,name=name+"_bn")
if with_act:
act = mx.symbol.Activation(
data=bn, act_type=act_type, attr=mirror_attr,name=name + "_relu")
return act
else:
return bn
def block35(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={},name=""):# inception resnet
tower_conv = ConvFactory(net, 32, (1, 1),name= name+"_1x1") #inception_resnet_v2_a1_1x1
tower_conv1_0 = ConvFactory(net, 32, (1, 1),name= name+"_3x3_reduce") # #inception_resnet_v2_a1_3X3_reduce
tower_conv1_1 = ConvFactory(tower_conv1_0, 32, (3, 3), pad=(1, 1),name= name+"_3x3")
tower_conv2_0 = ConvFactory(net, 32, (1, 1),name=name+"_3x3_2_reduce") # inception_resnet_v2_a1_3x3_2_reduce
tower_conv2_1 = ConvFactory(tower_conv2_0, 48, (3, 3), pad=(1, 1),name=name+"_3x3_2")#inception_resnet_v2_a1_3x3_2
tower_conv2_2 = ConvFactory(tower_conv2_1, 64, (3, 3), pad=(1, 1),name=name+"_3x3_3")
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_1, tower_conv2_2])
tower_out = mx.symbol.Convolution(
data=tower_mixed, num_filter=input_num_channels, kernel=(1,1), stride=(1,1), pad=(0,0),name=name+"_up",no_bias=False)
#tower_out = ConvFactory(
# tower_mixed, input_num_channels, (1, 1), with_act=False,no_bias=False,name=name+"_up")# "inception_resnet_v2_a1_up"
net += scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def block17(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={},name=""):
tower_conv = ConvFactory(net, 192, (1, 1),name=name+"_1x1")
tower_conv1_0 = ConvFactory(net, 128, (1, 1),name=name+"_1x7_reduce") #inception_resnet_v2_b1_1x7_reduce
tower_conv1_1 = ConvFactory(tower_conv1_0, 160, (1, 7), pad=(0, 3),name=name+"_1x7")
tower_conv1_2 = ConvFactory(tower_conv1_1, 192, (7, 1), pad=(3, 0),name=name+"_7x1")
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_2])
tower_out = mx.symbol.Convolution(
data=tower_mixed, num_filter=input_num_channels, kernel=(1,1), stride=(1,1), pad=(0,0),name=name+"_up",no_bias=False)
# tower_out = ConvFactory(
# tower_mixed, input_num_channels, (1, 1), with_act=False,name=name+"_up",no_bias=False)#inception_resnet_v2_b1_up
net += scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def block8(net, input_num_channels, scale=1.0, with_act=True, act_type='relu', mirror_attr={},name=""):
tower_conv = ConvFactory(net, 192, (1, 1),name=name+"_1x1") #inception_resnet_v2_c1_1x1
tower_conv1_0 = ConvFactory(net, 192, (1, 1),name=name+"_1x3_reduce") #inception_resnet_v2_c1_1x3_reduce
tower_conv1_1 = ConvFactory(tower_conv1_0, 224, (1, 3), pad=(0, 1),name=name+"_1x3")#inception_resnet_v2_c1_1x3
tower_conv1_2 = ConvFactory(tower_conv1_1, 256, (3, 1), pad=(1, 0),name=name+"_3x1")
tower_mixed = mx.symbol.Concat(*[tower_conv, tower_conv1_2])
tower_out = mx.symbol.Convolution(
data=tower_mixed, num_filter=input_num_channels, kernel=(1,1), stride=(1,1), pad=(0,0),name=name+"_up",no_bias=False)
#tower_out = ConvFactory(
# tower_mixed, input_num_channels, (1, 1), with_act=False,name=name+"_up",no_bias=False)#inception_resnet_v2_c1_up
net += scale * tower_out
if with_act:
act = mx.symbol.Activation(
data=net, act_type=act_type, attr=mirror_attr)
return act
else:
return net
def repeat(inputs, repetitions, layer,name, *args, **kwargs):
outputs = inputs
for i in range(repetitions):
outputs = layer(outputs,name=name.format(i+1), *args, **kwargs)
return outputs
def get_inceptionresnet_conv(data):
# inceptionresnet 1
incption_1 = ConvFactory(data=data, num_filter=32,kernel=(3, 3),pad=(1,1), stride=(2, 2),name = "conv1_3x3_s2") #[ ,32,149,149]
# inceptionresnet 2
conv2a_3_3 = ConvFactory(incption_1, 32, (3, 3), pad=(1, 1),name="conv2_3x3_s1") # reduce the size -1
conv2b_3_3 = ConvFactory(conv2a_3_3, 64, (3, 3), pad=(1, 1),name="conv3_3x3_s1")
incption_2 = mx.symbol.Pooling(
data=conv2b_3_3, kernel=(3, 3), stride=(2, 2),pad=(1,1),pool_type='max') # [*,64,73,73]
# inceptionresnet 3
conv3a_1_1 = ConvFactory(incption_2, 80, (1, 1),name="conv4_3x3_reduce")
conv3b_3_3 = ConvFactory(conv3a_1_1, 192, (3, 3),pad=(1,1),name="conv4_3x3")
incption_3 = mx.symbol.Pooling(
data=conv3b_3_3, kernel=(3, 3), stride=(2, 2),pad=(1,1), pool_type='max') # [*,192,35,35]
# inceptionresnet 4
tower_conv = ConvFactory(incption_3, 96, (1, 1),name="conv5_1x1")
tower_conv1_0 = ConvFactory(incption_3, 48, (1, 1),name= "conv5_5x5_reduce")
tower_conv1_1 = ConvFactory(tower_conv1_0, 64, (5, 5), pad=(2, 2),name="conv5_5x5")
tower_conv2_0 = ConvFactory(incption_3, 64, (1, 1),name="conv5_3x3_reduce")
tower_conv2_1 = ConvFactory(tower_conv2_0, 96, (3, 3), pad=(1, 1),name="conv5_3x3")
tower_conv2_2 = ConvFactory(tower_conv2_1, 96, (3, 3), pad=(1, 1),name="conv5_3x3_2")
tower_pool3_0 = mx.symbol.Pooling(data=incption_3, kernel=(
3, 3), stride=(1, 1), pad=(1, 1), pool_type='avg')
tower_conv3_1 = ConvFactory(tower_pool3_0, 64, (1, 1),name="conv5_1x1_ave")
stem_inception_4 = mx.symbol.Concat(
*[tower_conv, tower_conv1_1, tower_conv2_2, tower_conv3_1]) # [*,320,35,35]
## resnet begin
res_out_4 = repeat(stem_inception_4, 10, block35, scale=0.17, input_num_channels=320,name="inception_resnet_v2_a{0}")
#upscale and pooling
tower_conv = ConvFactory(res_out_4, 384, (3, 3), stride=(2, 2),pad=(1,1),name="reduction_a_3x3")
tower_conv1_0 = ConvFactory(res_out_4, 256, (1, 1),name="reduction_a_3x3_2_reduce")
tower_conv1_1 = ConvFactory(tower_conv1_0, 256, (3, 3), pad=(1, 1),name="reduction_a_3x3_2")
tower_conv1_2 = ConvFactory(tower_conv1_1, 384, (3, 3), pad=(1, 1),stride=(2, 2),name="reduction_a_3x3_3")
tower_pool = mx.symbol.Pooling(res_out_4, kernel=(
3, 3), stride=(2, 2),pad=(1,1),pool_type='max')
incption_4 = mx.symbol.Concat(*[tower_conv, tower_conv1_2, tower_pool]) # [*,1088,17,17]
#inception_4 = incption_4
inception_4 = repeat(incption_4, 20, block17, scale=0.1, input_num_channels=1088,name="inception_resnet_v2_b{0}")
return inception_4
def get_inceptionresnet_train(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
gt_boxes = mx.symbol.Variable(name="gt_boxes")
rpn_label = mx.symbol.Variable(name='label')
rpn_bbox_target = mx.symbol.Variable(name='bbox_target')
rpn_bbox_weight = mx.symbol.Variable(name='bbox_weight')
# shared convolutional layers
conv_feat = get_inceptionresnet_conv(data)
#print(conv_feat)
# RPN layers
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
# classification
rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape, label=rpn_label, multi_output=True,
normalization='valid', use_ignore=True, ignore_label=-1, name="rpn_cls_prob")
# bounding box regression
rpn_bbox_loss_ = rpn_bbox_weight * mx.symbol.smooth_l1(name='rpn_bbox_loss_', scalar=3.0, data=(rpn_bbox_pred - rpn_bbox_target))
rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss', data=rpn_bbox_loss_, grad_scale=1.0 / config.TRAIN.RPN_BATCH_SIZE)
# ROI proposal
rpn_cls_act = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_act")
rpn_cls_act_reshape = mx.symbol.Reshape(
data=rpn_cls_act, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_act_reshape')
if config.TRAIN.CXX_PROPOSAL:
rois = mx.symbol.Proposal(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_act_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TRAIN.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TRAIN.RPN_POST_NMS_TOP_N,
threshold=config.TRAIN.RPN_NMS_THRESH, rpn_min_size=config.TRAIN.RPN_MIN_SIZE)
# ROI proposal target
gt_boxes_reshape = mx.symbol.Reshape(data=gt_boxes, shape=(-1, 5), name='gt_boxes_reshape')
group = mx.symbol.Custom(rois=rois, gt_boxes=gt_boxes_reshape, op_type='proposal_target',
num_classes=num_classes, batch_images=config.TRAIN.BATCH_IMAGES,
batch_rois=config.TRAIN.BATCH_ROIS, fg_fraction=config.TRAIN.FG_FRACTION)
rois = group[0]
label = group[1]
bbox_target = group[2]
bbox_weight = group[3]
# Fast R-CNN
roi_pool = mx.symbol.ROIPooling(
name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(17, 17), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
#inception 5
net = roi_pool
tower_conv = ConvFactory(net, 256, (1, 1),name="reduction_b_3x3_reduce")
tower_conv0_1 = ConvFactory(tower_conv, 384, (3, 3), stride=(2, 2),name="reduction_b_3x3")
tower_conv1 = ConvFactory(net, 256, (1, 1),name="reduction_b_3x3_2_reduce")
tower_conv1_1 = ConvFactory(tower_conv1, 288, (3, 3), stride=(2, 2),name="reduction_b_3x3_2")
tower_conv2 = ConvFactory(net, 256, (1, 1),name="reduction_b_3x3_3_reduce")
tower_conv2_1 = ConvFactory(tower_conv2, 288, (3, 3), pad=(1, 1),name="reduction_b_3x3_3")
tower_conv2_2 = ConvFactory(tower_conv2_1, 320, (3, 3), stride=(2, 2),name="reduction_b_3x3_4")
tower_pool = mx.symbol.Pooling(net, kernel=(
3, 3), stride=(2, 2), pool_type='max')
net = mx.symbol.Concat(
*[tower_conv0_1, tower_conv1_1, tower_conv2_2, tower_pool])
# inception 6
net = repeat(net, 9, block8, scale=0.2, input_num_channels=2080,name="inception_resnet_v2_c{0}")
net = block8(net, with_act=False, input_num_channels=2080,name="inception_resnet_v2_c10")
net = ConvFactory(net, 1536, (1, 1),name="conv6_1x1")
pool1 = mx.symbol.Pooling(net, kernel=(
8, 8), global_pool=True, pool_type='avg')
pool1 = mx.symbol.Flatten(pool1)
pool1 = mx.symbol.Dropout(data=pool1, p=0.2)
# pool1 = mx.symbol.FullyConnected(data=pool1, num_hidden=num_classes)
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score, label=label, normalization='batch')
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4)
bbox_loss_ = bbox_weight * mx.symbol.smooth_l1(name='bbox_loss_', scalar=1.0, data=(bbox_pred - bbox_target))
bbox_loss = mx.sym.MakeLoss(name='bbox_loss', data=bbox_loss_, grad_scale=1.0 / config.TRAIN.BATCH_ROIS)
# reshape output
label = mx.symbol.Reshape(data=label, shape=(config.TRAIN.BATCH_IMAGES, -1), name='label_reshape')
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TRAIN.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_loss = mx.symbol.Reshape(data=bbox_loss, shape=(config.TRAIN.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_loss_reshape')
group = mx.symbol.Group([rpn_cls_prob, rpn_bbox_loss, cls_prob, bbox_loss, mx.symbol.BlockGrad(label)])
return group
def get_inceptionresnet_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS):
data = mx.symbol.Variable(name="data")
im_info = mx.symbol.Variable(name="im_info")
# shared convolutional layers
conv_feat = get_inceptionresnet_conv(data)
# RPN
rpn_conv = mx.symbol.Convolution(
data=conv_feat, kernel=(3, 3), pad=(1, 1), num_filter=512, name="rpn_conv_3x3")
rpn_relu = mx.symbol.Activation(data=rpn_conv, act_type="relu", name="rpn_relu")
rpn_cls_score = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=2 * num_anchors, name="rpn_cls_score")
rpn_bbox_pred = mx.symbol.Convolution(
data=rpn_relu, kernel=(1, 1), pad=(0, 0), num_filter=4 * num_anchors, name="rpn_bbox_pred")
# ROI Proposal
rpn_cls_score_reshape = mx.symbol.Reshape(
data=rpn_cls_score, shape=(0, 2, -1, 0), name="rpn_cls_score_reshape")
rpn_cls_prob = mx.symbol.SoftmaxActivation(
data=rpn_cls_score_reshape, mode="channel", name="rpn_cls_prob")
rpn_cls_prob_reshape = mx.symbol.Reshape(
data=rpn_cls_prob, shape=(0, 2 * num_anchors, -1, 0), name='rpn_cls_prob_reshape')
if config.TEST.CXX_PROPOSAL:
rois = mx.symbol.Proposal(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
feature_stride=config.RPN_FEAT_STRIDE, scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
else:
rois = mx.symbol.Custom(
cls_prob=rpn_cls_prob_reshape, bbox_pred=rpn_bbox_pred, im_info=im_info, name='rois',
op_type='proposal', feat_stride=config.RPN_FEAT_STRIDE,
scales=tuple(config.ANCHOR_SCALES), ratios=tuple(config.ANCHOR_RATIOS),
rpn_pre_nms_top_n=config.TEST.RPN_PRE_NMS_TOP_N, rpn_post_nms_top_n=config.TEST.RPN_POST_NMS_TOP_N,
threshold=config.TEST.RPN_NMS_THRESH, rpn_min_size=config.TEST.RPN_MIN_SIZE)
# Fast R-CNN
roi_pool = mx.symbol.ROIPooling(
name='roi_pool5', data=conv_feat, rois=rois, pooled_size=(17, 17), spatial_scale=1.0 / config.RCNN_FEAT_STRIDE)
net = roi_pool
# inception 5
# net = repeat(roi_pool, 20, block17, scale=0.1, input_num_channels=1088, name="inception_resnet_v2_b{0}")
tower_conv = ConvFactory(net, 256, (1, 1), name="reduction_b_3x3_reduce")
tower_conv0_1 = ConvFactory(tower_conv, 384, (3, 3), stride=(2, 2), name="reduction_b_3x3")
tower_conv1 = ConvFactory(net, 256, (1, 1), name="reduction_b_3x3_2_reduce")
tower_conv1_1 = ConvFactory(tower_conv1, 288, (3, 3), stride=(2, 2), name="reduction_b_3x3_2")
tower_conv2 = ConvFactory(net, 256, (1, 1), name="reduction_b_3x3_3_reduce")
tower_conv2_1 = ConvFactory(tower_conv2, 288, (3, 3), pad=(1, 1), name="reduction_b_3x3_3")
tower_conv2_2 = ConvFactory(tower_conv2_1, 320, (3, 3), stride=(2, 2), name="reduction_b_3x3_4")
tower_pool = mx.symbol.Pooling(net, kernel=(
3, 3), stride=(2, 2), pool_type='max')
net = mx.symbol.Concat(
*[tower_conv0_1, tower_conv1_1, tower_conv2_2, tower_pool])
# inception 6
# net = repeat(net, 9, block8, scale=0.2, input_num_channels=2080, name="inception_resnet_v2_c{0}")
# net = block8(net, with_act=False, input_num_channels=2080, name="inception_resnet_v2_c10")
net = ConvFactory(net, 1536, (1, 1), name="conv6_1x1")
pool1 = mx.symbol.Pooling(net, kernel=(
8, 8), global_pool=True, pool_type='avg')
pool1 = mx.symbol.Flatten(pool1)
#pool1 = mx.symbol.Dropout(data=pool1, p=0.5)
# classification
cls_score = mx.symbol.FullyConnected(name='cls_score', data=pool1, num_hidden=num_classes)
cls_prob = mx.symbol.SoftmaxOutput(name='cls_prob', data=cls_score)
# bounding box regression
bbox_pred = mx.symbol.FullyConnected(name='bbox_pred', data=pool1, num_hidden=num_classes * 4)
# reshape output
cls_prob = mx.symbol.Reshape(data=cls_prob, shape=(config.TEST.BATCH_IMAGES, -1, num_classes), name='cls_prob_reshape')
bbox_pred = mx.symbol.Reshape(data=bbox_pred, shape=(config.TEST.BATCH_IMAGES, -1, 4 * num_classes), name='bbox_pred_reshape')
# group output
group = mx.symbol.Group([rois, cls_prob, bbox_pred])
return group
| likelyzhao/mxnet | example/rcnn/rcnn/symbol/symbol_inceptionresnet.py | Python | apache-2.0 | 18,850 |
def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
return {
'test': ['install']
}
def test(job):
"""
Tests parsing of a bp with/without default values
"""
import sys
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
repo1_path = j.sal.fs.joinPaths(j.dirs.codeDir, 'github/jumpscale/jumpscale_core8/tests/sample_repo1')
repo2_path = j.sal.fs.joinPaths(j.dirs.codeDir, 'github/jumpscale/jumpscale_core8/tests/sample_repo3')
try:
repo1_expected_steps = [
('datacenter.ovh_germany1.install', 'datacenter.ovh_germany2.install',
'datacenter.ovh_germany3.install', 'sshkey.main.install'),
('cockpit.cockpitv1.install', 'cockpit.cockpitv2.install')
]
j.atyourservice.reposDiscover()
repo1 = j.atyourservice.repoGet(repo1_path)
repos.append(repo1)
for bp in repo1.blueprints:
repo1.blueprintExecute(path=bp.path)
run = repo1.runCreate(profile=False, debug=False)
for index, step in enumerate(run.steps):
expected_step_jobs = repo1_expected_steps[index]
for job in step.jobs:
job_name = '%s.%s.%s' % (job.model.dbobj.actorName, job.model.dbobj.serviceName, job.model.dbobj.actionName)
if job_name not in expected_step_jobs:
failures.append('Job [%s] is added to step #%s unexpectedly' % (job_name, index + 1))
expected_job_statuses = {
'runtime_error_service.instance.install': 'ok',
'runtime_error_service.instance.test': 'error',
'runtime_error_service.instance.test2': 'ok',
'runtime_error_service.instance.test3': 'new'
}
expected_step_statuses = ['ok', 'error', 'new']
expected_run_status = 'error'
repo2 = j.atyourservice.repoGet(repo2_path)
repos.append(repo2)
for bp in repo2.blueprints:
repo2.blueprintExecute(path=bp.path)
run = repo2.runCreate(profile=False, debug=False)
try:
run.execute()
except:
for index, step in enumerate(run.steps):
for job in step.jobs:
job_name = '%s.%s.%s' % (job.model.dbobj.actorName, job.model.dbobj.serviceName, job.model.dbobj.actionName)
if job_name not in expected_job_statuses:
failures.append('Job [%s] is unexpected in step #%s' % (job_name, index + 1))
elif expected_job_statuses[job_name] != job.model.dbobj.state:
failures.append('Job [%s] has unexpected status [%s] expected [%s]' % (job_name, job.model.dbobj.state, expected_job_statuses[job_name]))
if step.state != expected_step_statuses[index]:
failures.append('Step #%s has unexpected status [%s] expected [%s]' % (index + 1, step.state, expected_step_statuses[index]))
if str(run.state) != expected_run_status:
failures.append('Run has unexpected status [%s] expected [%s]' % (str(run.state), expected_run_status))
else:
failures.append('Expected runtime error on repo [%s] did not happend' % repo2)
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
for repo in repos:
repo.destroy()
| Jumpscale/ays_jumpscale8 | tests/test_services/test_validate_run_steps/actions.py | Python | apache-2.0 | 3,971 |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy
import warnings
from paddle import Tensor
import paddle.fluid.core as core
from ..fluid.framework import _in_eager_mode
__all__ = [ # noqa
'LRScheduler',
'NoamDecay',
'PiecewiseDecay',
'NaturalExpDecay',
'InverseTimeDecay',
'PolynomialDecay',
'LinearWarmup',
'ExponentialDecay',
'MultiStepDecay',
'StepDecay',
'LambdaDecay',
'ReduceOnPlateau',
'CosineAnnealingDecay',
'MultiplicativeDecay'
]
class LRScheduler(object):
"""
LRScheduler Base class. Define the common interface of a learning rate scheduler.
User can import it by ``from paddle.optimizer.lr import LRScheduler`` ,
then overload it for your subclass and have a custom implementation of ``get_lr()`` .
Otherwise, an ``NotImplementedError`` exception will be thrown.
Args:
learning_rate (float): The initial learning rate. It is a python float number.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
instance to schedule learning rate.
Examples:
Here is an example of a simple ``StepDecay`` implementation.
.. code-block:: python
import paddle
from paddle.optimizer.lr import LRScheduler
class StepDecay(LRScheduler):
def __init__(self,
learning_rate,
step_size,
gamma=0.1,
last_epoch=-1,
verbose=False):
if not isinstance(step_size, int):
raise TypeError(
"The type of 'step_size' must be 'int', but received %s." %
type(step_size))
if gamma >= 1.0:
raise ValueError('gamma should be < 1.0.')
self.step_size = step_size
self.gamma = gamma
super(StepDecay, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
i = self.last_epoch // self.step_size
return self.base_lr * (self.gamma**i)
"""
def __init__(self, learning_rate=0.1, last_epoch=-1, verbose=False):
if not isinstance(learning_rate, (float, int)):
raise TypeError(
"The type of learning rate must be float, but received {}".
format(type(learning_rate)))
self.base_lr = float(learning_rate)
self.last_lr = float(learning_rate)
self.last_epoch = last_epoch
self.verbose = verbose
self._var_name = None
self.step()
def __call__(self):
"""
Return lastest computed learning rate on current epoch.
"""
return self.last_lr
def step(self, epoch=None):
"""
``step`` should be called after ``optimizer.step`` . It will update the learning rate in optimizer according to current ``epoch`` .
The new learning rate will take effect on next ``optimizer.step`` .
Args:
epoch (int, None): specify current epoch. Default: None. Auto-increment from last_epoch=-1.
Returns:
None
"""
if epoch is None:
self.last_epoch += 1
self.last_lr = self.get_lr()
else:
self.last_epoch = epoch
if hasattr(self, "_get_closed_form_lr"):
self.last_lr = self._get_closed_form_lr()
else:
self.last_lr = self.get_lr()
if self.verbose:
print('Epoch {}: {} set learning rate to {}.'.format(
self.last_epoch, self.__class__.__name__, self.last_lr))
def state_dict(self):
"""
Returns the state of the scheduler as a :class:`dict`.
It is a subset of ``self.__dict__`` .
"""
self.state_keys()
state_dict = {}
for key in self.keys:
if key not in self.__dict__:
continue
value = self.__dict__[key]
if isinstance(value, Tensor):
assert value.shape == [
1
], "shape of Tensor in state_dict must be [1] {}".format(
value.shape)
value = value.numpy()[0]
state_dict[key] = value
return state_dict
# For those subclass who overload LRScheduler, "last_epoch, last_lr" will be saved by default.
# (Note): you can change it for your subclass.
def state_keys(self):
"""
For those subclass who overload ``LRScheduler`` (Base Class). Acquiescently, "last_epoch, last_lr" will be saved by ``self.keys = ['last_epoch', 'last_lr']`` .
``last_epoch`` is the current epoch num, and ``last_lr`` is the current learning rate.
If you want to change the default behavior, you should have a custom implementation of ``_state_keys()`` to redefine ``self.keys`` .
"""
self.keys = ['last_epoch', 'last_lr']
def set_state_dict(self, state_dict):
"""
Loads the schedulers state.
"""
self.state_keys()
for key in self.keys:
if key in state_dict:
self.__dict__[key] = state_dict[key]
else:
raise RuntimeError(
"Please check whether state_dict is correct for optimizer. Can't find [ {} ] in state_dict".
format(key))
if len(state_dict) > len(self.keys):
warnings.warn(
"There are some unused values in state_dict. Maybe the optimizer have different 'LearningRateDecay' when invoking state_dict and set_dict"
)
# alias for set_state_dict
set_dict = set_state_dict
def get_lr(self):
"""
For those subclass who overload ``LRScheduler`` (Base Class), User should have a custom implementation of ``get_lr()`` .
Otherwise, an ``NotImplementedError`` exception will be thrown.
"""
# calculate by python float
raise NotImplementedError
class NoamDecay(LRScheduler):
r"""
Applies Noam Decay to the initial learning rate.
The algorithm can be described as following.
.. math::
new\_learning\_rate = learning\_rate * d_{model}^{-0.5} * min(epoch^{-0.5}, epoch * warmup\_steps^{-1.5})
Please reference `attention is all you need <https://arxiv.org/pdf/1706.03762.pdf>`_
Args:
d$_{model}$(int): The dimensionality of input and output feature vector of model. It is a python int number.
warmup_steps(int): The number of warmup steps. A super parameter. It is a python int number
learning_rate (float): The initial learning rate. It is a python float number. Default: 1.0.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``NoamDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.NoamDecay(d_model=0.01, warmup_steps=100, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.NoamDecay(d_model=0.01, warmup_steps=100, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self,
d_model,
warmup_steps,
learning_rate=1.0,
last_epoch=-1,
verbose=False):
self.d_model = d_model
self.warmup_steps = warmup_steps
super(NoamDecay, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
if self.last_epoch == 0:
a = 1
else:
a = self.last_epoch**-0.5
b = self.warmup_steps**-1.5 * self.last_epoch
return self.base_lr * (self.d_model**-0.5) * min(a, b)
class PiecewiseDecay(LRScheduler):
"""
Piecewise learning rate scheduler.
The algorithm can be described as the code below:
.. code-block:: text
boundaries = [100, 200]
values = [1.0, 0.5, 0.1]
if epoch < 100:
learning_rate = 1.0
elif 100 <= global_step < 200:
learning_rate = 0.5
else:
learning_rate = 0.1
Args:
boundaries(list|tuple): A list/tuple of steps numbers. The type of element in the list is python int.
values(list|tuple): A list/tuple of learning rate values that will be picked during different epoch boundaries.
The type of element in the list is python float.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``PiecewiseDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[3, 6, 9], values=[0.1, 0.2, 0.3, 0.4], verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.PiecewiseDecay(boundaries=[3, 6, 9], values=[0.1, 0.2, 0.3, 0.4], verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self, boundaries, values, last_epoch=-1, verbose=False):
self.boundaries = boundaries
self.values = values
super(PiecewiseDecay, self).__init__(
last_epoch=last_epoch, verbose=verbose)
def get_lr(self):
for i in range(len(self.boundaries)):
if self.last_epoch < self.boundaries[i]:
return self.values[i]
return self.values[len(self.values) - 1]
class NaturalExpDecay(LRScheduler):
r"""
Applies natural exponential decay to the initial learning rate.
The algorithm can be described as following:
.. math::
new\_learning\_rate = learning\_rate * e^{- gamma * epoch}
Args:
learning_rate (float): The initial learning rate. It is a python float number.
gamma (float, optional): A Ratio to update the learning rate, should greater than 0.0 to make learning rate decay. Default: 0.1.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``NaturalExpDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.5, gamma=0.1, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.NaturalExpDecay(learning_rate=0.5, gamma=0.1, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False):
assert gamma > 0.0, " 'gamma' must be a positive number so that the learning rate will decay."
self.gamma = gamma
super(NaturalExpDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
return self.base_lr * math.exp(-1 * self.gamma * self.last_epoch)
class InverseTimeDecay(LRScheduler):
r"""
Applies inverse time decay to the initial learning rate.
The algorithm can be described as following:
.. math::
new\_learning\_rate = \frac{learning\_rate}{1 + gamma * epoch}
Args:
learning_rate (float): The initial learning rate. It is a python float number.
gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` .
It should be less than 1.0. Default: 0.1.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``InverseTimeDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.InverseTimeDecay(learning_rate=0.5, gamma=0.1, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.InverseTimeDecay(learning_rate=0.5, gamma=0.1, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False):
self.gamma = gamma
super(InverseTimeDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
return self.base_lr / (1 + self.gamma * self.last_epoch)
class PolynomialDecay(LRScheduler):
r"""
Applies polynomial decay to the initial learning rate.
The algorithm can be described as following.
If cycle is set to True, then:
.. math::
decay\_steps & = decay\_steps * math.ceil(\frac{epoch}{decay\_steps})
new\_learning\_rate & = (learning\_rate-end\_lr)*(1-\frac{epoch}{decay\_steps})^{power}+end\_lr
If cycle is set to False, then:
.. math::
epoch & = min(epoch, decay\_steps)
new\_learning\_rate & = (learning\_rate-end\_lr)*(1-\frac{epoch}{decay\_steps})^{power}+end\_lr
Args:
learning_rate (float): The initial learning rate. It is a python float number.
decay_steps(int): The decay step size. It determines the decay cycle. It must be a positive integer.
end_lr(float, optional): The minimum final learning rate. Default: 0.0001.
power(float, optional): Power of polynomial, should greater than 0.0 to get learning rate decay. Default: 1.0.
cycle(bool, optional): Whether the learning rate rises again. If True, then the learning rate will rise when it decrease
to ``end_lr`` . If False, the learning rate is monotone decreasing. Default: False.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``PolynomialDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.PolynomialDecay(learning_rate=0.5, decay_steps=20, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.PolynomialDecay(learning_rate=0.5, decay_steps=20, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self,
learning_rate,
decay_steps,
end_lr=0.0001,
power=1.0,
cycle=False,
last_epoch=-1,
verbose=False):
assert decay_steps > 0 and isinstance(
decay_steps, int), " 'decay_steps' must be a positive integer."
self.decay_steps = decay_steps
self.end_lr = end_lr
assert power > 0.0, " 'power' must be greater than 0.0 so that the learning rate will decay."
self.power = power
self.cycle = cycle
super(PolynomialDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
tmp_epoch_num = self.last_epoch
tmp_decay_steps = self.decay_steps
if self.cycle:
div_res = math.ceil(
float(self.last_epoch) / float(self.decay_steps))
if self.last_epoch == 0:
div_res = 1
tmp_decay_steps = self.decay_steps * div_res
else:
tmp_epoch_num = min(self.last_epoch, self.decay_steps)
return (self.base_lr - self.end_lr) * (
(1 - float(tmp_epoch_num) / float(tmp_decay_steps)
)**self.power) + self.end_lr
class LinearWarmup(LRScheduler):
r"""
Linear learning rate warm up strategy. Update the learning rate preliminarily before the normal learning rate scheduler.
For more information, please refer to `Bag of Tricks for Image Classification with Convolutional Neural Networks <https://arxiv.org/abs/1812.01187>`_
When epoch < warmup_steps, learning rate is updated as:
.. math::
lr = start\_lr + (end\_lr - start\_lr) * \frac{epoch}{warmup\_steps}
where start_lr is the initial learning rate, and end_lr is the final learning rate;
When epoch >= warmup_steps, learning rate is updated as:
.. math::
lr = learning_rate
where ``learning_rate`` is float or any subclass of ``LRScheduler`` .
Args:
learning_rate (float|LRScheduler): The learning rate after warm-up. It is a python float number or any subclass of ``LRScheduler`` .
warmup_steps (int): total steps of warm up. It must be a positive integer.
start_lr (float): Initial learning rate of warm up.
end_lr (float): Final learning rate of warm up.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``LinearWarmup`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.LinearWarmup(
learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.LinearWarmup(
learning_rate=0.5, warmup_steps=20, start_lr=0, end_lr=0.5, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self,
learning_rate,
warmup_steps,
start_lr,
end_lr,
last_epoch=-1,
verbose=False):
type_check = isinstance(learning_rate, float) or isinstance(
learning_rate, int) or isinstance(learning_rate, LRScheduler)
if not type_check:
raise TypeError(
"the type of learning_rate should be [int, float or LRScheduler], the current type is {}".
format(learning_rate))
self.learning_rate = learning_rate
assert warmup_steps > 0 and isinstance(
warmup_steps, int), " 'warmup_steps' must be a positive integer."
self.warmup_steps = warmup_steps
self.start_lr = start_lr
self.end_lr = end_lr
assert end_lr > start_lr, "end_lr {} must be greater than start_lr {}".format(
end_lr, start_lr)
super(LinearWarmup, self).__init__(start_lr, last_epoch, verbose)
def state_dict(self):
"""
Returns the state of the LinearWarmup scheduler as a :class:`dict`.
It is a subset of ``self.__dict__`` .
"""
state_dict = super(LinearWarmup, self).state_dict()
if isinstance(self.learning_rate, LRScheduler):
state_dict["LinearWarmup_LR"] = self.learning_rate.state_dict()
return state_dict
def set_state_dict(self, state_dict):
"""
Loads state_dict for LinearWarmup scheduler.
"""
super(LinearWarmup, self).set_state_dict(state_dict)
if isinstance(self.learning_rate, LRScheduler):
self.learning_rate.set_state_dict(state_dict["LinearWarmup_LR"])
def get_lr(self):
if self.last_epoch < self.warmup_steps:
return (self.end_lr - self.start_lr) * float(
self.last_epoch) / float(self.warmup_steps) + self.start_lr
else:
if isinstance(self.learning_rate, LRScheduler):
self.learning_rate.step(self.last_epoch - self.warmup_steps)
return self.learning_rate()
return self.learning_rate
class ExponentialDecay(LRScheduler):
r"""
Update learning rate by `gamma` each epoch.
The algorithm can be described as following.
.. math::
new\_learning\_rate = last\_learning\_rate * gamma
Args:
learning_rate (float): The initial learning rate. It is a python float number.
gamma (float): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` .
It should be in interval (0.0, 1.0).
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``ExponentialDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.ExponentialDecay(learning_rate=0.5, gamma=0.9, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.ExponentialDecay(learning_rate=0.5, gamma=0.9, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self, learning_rate, gamma, last_epoch=-1, verbose=False):
assert gamma > 0.0 and gamma < 1.0, " 'gamma' must be in interval (0.0, 1.0) so that the learning rate will decay."
self.gamma = gamma
super(ExponentialDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
return self.base_lr * (self.gamma**self.last_epoch)
class MultiStepDecay(LRScheduler):
"""
Update the learning rate by ``gamma`` once ``epoch`` reaches one of the milestones.
The algorithm can be described as the code below.
.. code-block:: text
learning_rate = 0.5
milestones = [30, 50]
gamma = 0.1
if epoch < 30:
learning_rate = 0.5
elif epoch < 50:
learning_rate = 0.05
else:
learning_rate = 0.005
Args:
learning_rate (float): The initial learning rate. It is a python float number.
milestones (tuple|list): List or tuple of each boundaries. Must be increasing.
gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` .
It should be less than 1.0. Default: 0.1.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``MultiStepDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.MultiStepDecay(learning_rate=0.5, milestones=[2, 4, 6], gamma=0.8, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self,
learning_rate,
milestones,
gamma=0.1,
last_epoch=-1,
verbose=False):
if not isinstance(milestones, (tuple, list)):
raise TypeError(
"The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s."
% type(milestones))
if not all([
milestones[i] < milestones[i + 1]
for i in range(len(milestones) - 1)
]):
raise ValueError('The elements of milestones must be incremented')
if gamma >= 1.0:
raise ValueError('gamma should be < 1.0.')
self.milestones = milestones
self.gamma = gamma
super(MultiStepDecay, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
for i in range(len(self.milestones)):
if self.last_epoch < self.milestones[i]:
return self.base_lr * (self.gamma**i)
return self.base_lr * (self.gamma**len(self.milestones))
class StepDecay(LRScheduler):
"""
Update the learning rate of ``optimizer`` by ``gamma`` every ``step_size`` number of epoch.
The algorithm can be described as the code below.
.. code-block:: text
learning_rate = 0.5
step_size = 30
gamma = 0.1
learning_rate = 0.5 if epoch < 30
learning_rate = 0.05 if 30 <= epoch < 60
learning_rate = 0.005 if 60 <= epoch < 90
...
Args:
learning_rate (float): The initial learning rate. It is a python float number.
step_size (int): the interval to update. It must be a positive integer.
gamma (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * gamma`` .
It should be less than 1.0. Default: 0.1.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``StepDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=5, gamma=0.8, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.5, step_size=5, gamma=0.8, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self,
learning_rate,
step_size,
gamma=0.1,
last_epoch=-1,
verbose=False):
if not isinstance(step_size, int):
raise TypeError(
"The type of 'step_size' must be 'int', but received %s." %
type(step_size))
if gamma >= 1.0:
raise ValueError('gamma should be < 1.0.')
assert step_size > 0 and isinstance(
step_size, int), " 'step_size' must be a positive integer."
self.step_size = step_size
self.gamma = gamma
super(StepDecay, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
i = self.last_epoch // self.step_size
return self.base_lr * (self.gamma**i)
class LambdaDecay(LRScheduler):
"""
Sets the learning rate of ``optimizer`` by function ``lr_lambda`` . ``lr_lambda`` is funciton which receives ``epoch`` .
The algorithm can be described as the code below.
.. code-block:: text
learning_rate = 0.5 # init learning_rate
lr_lambda = lambda epoch: 0.95 ** epoch
learning_rate = 0.5 # epoch 0, 0.5*0.95**0
learning_rate = 0.475 # epoch 1, 0.5*0.95**1
learning_rate = 0.45125 # epoch 2, 0.5*0.95**2
Args:
learning_rate (float): The initial learning rate. It is a python float number.
lr_lambda (function): A function which computes a factor by ``epoch`` , and then multiply the initial learning rate by this factor.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``LambdaDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.LambdaDecay(learning_rate=0.5, lr_lambda=lambda x:0.95**x, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.LambdaDecay(learning_rate=0.5, lr_lambda=lambda x:0.95**x, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self, learning_rate, lr_lambda, last_epoch=-1, verbose=False):
if not callable(lr_lambda):
raise TypeError(
"The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received %s."
% type(lr_lambda))
self.lr_lambda = lr_lambda
super(LambdaDecay, self).__init__(learning_rate, last_epoch, verbose)
def get_lr(self):
return self.base_lr * self.lr_lambda(self.last_epoch)
class ReduceOnPlateau(LRScheduler):
"""
Reduce learning rate when ``metrics`` has stopped descending. Models often benefit from reducing the learning rate
by 2 to 10 times once model performance has no longer improvement.
The ``metrics`` is the one which has been pass into ``step`` , it must be 1-D Tensor with shape [1]. When ``metrics``
stop descending for a ``patience`` number of epochs, the learning rate will be reduced to ``learning_rate * factor`` .
(Specially, ``mode`` can also be set to ``'max`` , in this case, when ``metrics`` stop ascending for a ``patience``
number of epochs, the learning rate will be reduced.)
In addition, After each reduction, it will wait a ``cooldown`` number of epochs before resuming above operation.
Args:
learning_rate (float): The initial learning rate. It is a python float number.
mode (str, optional): ``'min'`` or ``'max'`` can be selected. Normally, it is ``'min'`` , which means that the
learning rate will reduce when ``loss`` stops descending. Specially, if it's set to ``'max'`` , the learning
rate will reduce when ``loss`` stops ascending. Default: ``'min'`` .
factor (float, optional): The Ratio that the learning rate will be reduced. ``new_lr = origin_lr * factor`` .
It should be less than 1.0. Default: 0.1.
patience (int, optional): When ``loss`` doesn't improve for this number of epochs, learing rate will be reduced.
Default: 10.
threshold (float, optional): ``threshold`` and ``threshold_mode`` will determine the minimum change of ``loss`` .
This make tiny changes of ``loss`` will be ignored. Default: 1e-4.
threshold_mode (str, optional): ``'rel'`` or ``'abs'`` can be selected. In ``'rel'`` mode, the minimum change of ``loss``
is ``last_loss * threshold`` , where ``last_loss`` is ``loss`` in last epoch. In ``'abs'`` mode, the minimum
change of ``loss`` is ``threshold`` . Default: ``'rel'`` .
cooldown (int, optional): The number of epochs to wait before resuming normal operation. Default: 0.
min_lr (float, optional): The lower bound of the learning rate after reduction. Default: 0.
epsilon (float, optional): Minimal decay applied to lr. If the difference between new and old lr is smaller than epsilon,
the update is ignored. Default: 1e-8.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False``.
Returns:
``ReduceOnPlateau`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.ReduceOnPlateau(learning_rate=1.0, factor=0.5, patience=5, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step(loss) # If you update learning rate each step
# scheduler.step(loss) # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.ReduceOnPlateau(learning_rate=1.0, factor=0.5, patience=5, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step(out[0]) # If you update learning rate each step
# scheduler.step(out[0]) # If you update learning rate each epoch
"""
def __init__(self,
learning_rate,
mode='min',
factor=0.1,
patience=10,
threshold=1e-4,
threshold_mode='rel',
cooldown=0,
min_lr=0,
epsilon=1e-8,
verbose=False):
mode = mode.lower()
if mode not in ['min', 'max']:
raise ValueError('mode: ' + mode + ' is unknown!')
self.mode = mode
if factor >= 1.0:
raise ValueError(
'new_lr = origin_lr * gamma and gamma should be < 1.0.')
self.factor = factor
threshold_mode = threshold_mode.lower()
if threshold_mode not in ['rel', 'abs']:
raise ValueError('threshold mode: ' + threshold_mode +
' is unknown!')
self.threshold_mode = threshold_mode
if not isinstance(learning_rate, (float, int)):
raise TypeError(
"The type of 'learning_rate' in 'ReduceOnPlateau' must be 'float', but received %s."
% type(learning_rate))
self.patience = patience
self.threshold = threshold
self.threshold_mode = threshold_mode
self.cooldown = cooldown
self.min_lr = min_lr
self.epsilon = epsilon
self.cooldown_counter = 0
self.best = None
self.num_bad_epochs = 0
# Can not call Parent __init__, so implement here.
self.base_lr = float(learning_rate)
self.last_lr = float(learning_rate)
self.last_epoch = 0
self.verbose = verbose
self._var_name = None
# "cooldown_counter / best / num_bad_epochs / last_epoch / last_lr" will be stored.
def state_keys(self):
self.keys = [
'cooldown_counter', 'best', 'num_bad_epochs', 'last_epoch',
'last_lr'
]
def step(self, metrics, epoch=None):
"""
step should be called after `optimizer.step()` . It will update the learning rate in optimizer according to ``metrics`` .
The new learning rate will take effect on next epoch.
Args:
metrics (Tensor|numpy.ndarray|float): Which will be monitored to determine whether the learning rate will reduce.
If it stop descending for a ``patience`` number of epochs, the learning rate will reduce. If it's 'Tensor' or
'numpy.ndarray', its shape must be [1].
epoch (int, None): specify current epoch. Default: None. Auto-increment from last_epoch=-1.
Returns:
None
Examples:
Please refer to the example of current LRScheduler.
"""
if epoch is None:
self.last_epoch = self.last_epoch + 1
else:
self.last_epoch = epoch
if _in_eager_mode():
tmp = core.eager.Tensor
else:
tmp = Tensor
# loss must be float, numpy.ndarray or 1-D Tensor with shape [1]
if isinstance(metrics, (tmp, numpy.ndarray)):
assert len(metrics.shape) == 1 and metrics.shape[0] == 1, "the metrics.shape " \
"should be (1L,), but the current metrics.shape is {}. Maybe that " \
"you should call paddle.mean to process it first.".format(
metrics.shape)
elif not isinstance(metrics,
(int, float, numpy.float32, numpy.float64)):
raise TypeError(
"metrics must be 'int', 'float', 'np.float', 'numpy.ndarray' or 'paddle.Tensor', but receive {}".
format(type(metrics)))
if self.cooldown_counter > 0:
self.cooldown_counter -= 1
else:
if self.best is None or self._is_better(metrics, self.best):
self.best = metrics
self.num_bad_epochs = 0
else:
self.num_bad_epochs += 1
if self.num_bad_epochs > self.patience:
self.cooldown_counter = self.cooldown
self.num_bad_epochs = 0
new_lr = max(self.last_lr * self.factor, self.min_lr)
if self.last_lr - new_lr > self.epsilon:
self.last_lr = new_lr
if self.verbose:
print('Epoch {}: {} set learning rate to {}.'.format(
self.last_epoch, self.__class__.__name__,
self.last_lr))
def _is_better(self, current, best):
if self.mode == 'min' and self.threshold_mode == 'rel':
return current < best - best * self.threshold
elif self.mode == 'min' and self.threshold_mode == 'abs':
return current < best - self.threshold
elif self.mode == 'max' and self.threshold_mode == 'rel':
return current > best + best * self.threshold
else:
return current > best + self.threshold
class CosineAnnealingDecay(LRScheduler):
r"""
Set the learning rate using a cosine annealing schedule, where :math:`\eta_{max}` is set to
the initial learning_rate. :math:`T_{cur}` is the number of epochs since the last restart in
SGDR.
The algorithm can be described as following.
.. math::
\eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1
+ \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right),
& T_{cur} \neq (2k+1)T_{max};
\eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min})
\left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right),
& T_{cur} = (2k+1)T_{max}.
It has been proposed in `SGDR: Stochastic Gradient Descent with Warm Restarts <https://arxiv.org/abs/1608.03983>`_.
Note that this only implements the cosine annealing part of SGDR, and not the restarts.
Args:
learning_rate (float): The initial learning rate, that is :math:`\eta_{max}` . It can be set to python float or int number.
T_max (int): Maximum number of iterations. It is half of the decay cycle of learning rate. It must be a positive integer.
eta_min (float|int, optional): Minimum learning rate, that is :math:`\eta_{min}` . Default: 0.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``CosineAnnealingDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=0.5, T_max=10, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
# train on static graph mode
paddle.enable_static()
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
x = paddle.static.data(name='x', shape=[None, 4, 5])
y = paddle.static.data(name='y', shape=[None, 4, 5])
z = paddle.static.nn.fc(x, 100)
loss = paddle.mean(z)
scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=0.5, T_max=10, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler)
sgd.minimize(loss)
exe = paddle.static.Executor()
exe.run(start_prog)
for epoch in range(20):
for batch_id in range(5):
out = exe.run(
main_prog,
feed={
'x': np.random.randn(3, 4, 5).astype('float32'),
'y': np.random.randn(3, 4, 5).astype('float32')
},
fetch_list=loss.name)
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self,
learning_rate,
T_max,
eta_min=0,
last_epoch=-1,
verbose=False):
if not isinstance(T_max, int):
raise TypeError(
"The type of 'T_max' in 'CosineAnnealingDecay' must be 'int', but received %s."
% type(T_max))
if not isinstance(eta_min, (float, int)):
raise TypeError(
"The type of 'eta_min' in 'CosineAnnealingDecay' must be 'float, int', but received %s."
% type(eta_min))
assert T_max > 0 and isinstance(
T_max, int), " 'T_max' must be a positive integer."
self.T_max = T_max
self.eta_min = float(eta_min)
super(CosineAnnealingDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
if self.last_epoch == 0:
return self.base_lr
elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0:
return self.last_lr + (self.base_lr - self.eta_min) * (1 - math.cos(
math.pi / self.T_max)) / 2
return (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / (
1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) * (
self.last_lr - self.eta_min) + self.eta_min
def _get_closed_form_lr(self):
return self.eta_min + (self.base_lr - self.eta_min) * (1 + math.cos(
math.pi * self.last_epoch / self.T_max)) / 2
class MultiplicativeDecay(LRScheduler):
"""
Multiply the learning rate of ``optimizer`` by the factor given in function ``lr_lambda`` .
The algorithm can be described as the code below.
.. code-block:: text
learning_rate = 0.5 # init learning_rate
lr_lambda = lambda epoch: 0.95
learning_rate = 0.5 # epoch 0,
learning_rate = 0.475 # epoch 1, 0.5*0.95
learning_rate = 0.45125 # epoch 2, 0.475*0.95
Args:
learning_rate (float): The initial learning rate. It is a python float number.
lr_lambda (function): A function which computes a factor by ``epoch`` , and then multiply the last learning rate by this factor.
last_epoch (int, optional): The index of last epoch. Can be set to restart training. Default: -1, means initial learning rate.
verbose (bool, optional): If ``True``, prints a message to stdout for each update. Default: ``False`` .
Returns:
``MultiplicativeDecay`` instance to schedule learning rate.
Examples:
.. code-block:: python
import paddle
import numpy as np
# train on default dynamic graph mode
linear = paddle.nn.Linear(10, 10)
scheduler = paddle.optimizer.lr.MultiplicativeDecay(learning_rate=0.5, lr_lambda=lambda x:0.95, verbose=True)
sgd = paddle.optimizer.SGD(learning_rate=scheduler, parameters=linear.parameters())
for epoch in range(20):
for batch_id in range(5):
x = paddle.uniform([10, 10])
out = linear(x)
loss = paddle.mean(out)
loss.backward()
sgd.step()
sgd.clear_gradients()
scheduler.step() # If you update learning rate each step
# scheduler.step() # If you update learning rate each epoch
"""
def __init__(self, learning_rate, lr_lambda, last_epoch=-1, verbose=False):
if not callable(lr_lambda):
raise TypeError(
"The type of 'lr_lambda' in 'MultiplicativeDecay' must be 'function', but received %s."
% type(lr_lambda))
self.lr_lambda = lr_lambda
super(MultiplicativeDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
if self.last_epoch > 0:
return self.last_lr * self.lr_lambda(self.last_epoch)
else:
return self.base_lr
| PaddlePaddle/Paddle | python/paddle/optimizer/lr.py | Python | apache-2.0 | 65,986 |
# coding=utf-8
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import re
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.targets.unpacked_whls import UnpackedWheels
from pants.backend.python.tasks.unpack_wheels import UnpackWheels, UnpackWheelsFingerprintStrategy
from pants.task.unpack_remote_sources_base import UnpackedArchives
from pants.util.collections import assert_single_element
from pants_test.task_test_base import TaskTestBase
class UnpackWheelsTest(TaskTestBase):
@classmethod
def task_type(cls):
return UnpackWheels
def _make_req_library(self, requirement):
return self.make_target(spec='unpack/whls:foo-whls',
target_type=PythonRequirementLibrary,
requirements=[requirement])
def _make_unpacked_wheel(self, requirement, include_patterns, module_name='foo', **kwargs):
reqlib = self._make_req_library(requirement)
return self.make_target(spec='unpack:foo',
target_type=UnpackedWheels,
libraries=[reqlib.address.spec],
module_name=module_name,
include_patterns=include_patterns,
**kwargs)
def test_unpack_wheels_fingerprint_strategy(self):
fingerprint_strategy = UnpackWheelsFingerprintStrategy()
make_unpacked_wheel = functools.partial(self._make_unpacked_wheel, include_patterns=['bar'])
req1 = PythonRequirement('com.example.bar==0.0.1')
target = make_unpacked_wheel(req1)
fingerprint1 = fingerprint_strategy.compute_fingerprint(target)
# Now, replace the build file with a different version.
self.reset_build_graph()
target = make_unpacked_wheel(PythonRequirement('com.example.bar==0.0.2'))
fingerprint2 = fingerprint_strategy.compute_fingerprint(target)
self.assertNotEqual(fingerprint1, fingerprint2)
# Go back to the original library.
self.reset_build_graph()
target = make_unpacked_wheel(req1)
fingerprint3 = fingerprint_strategy.compute_fingerprint(target)
self.assertEqual(fingerprint1, fingerprint3)
def _assert_unpacking(self, module_name):
# TODO: figure out how to generate a nice fake wheel that the pex resolve will accept instead of
# depending on a real wheel!
pex_requirement = PythonRequirement('pex==1.5.3')
unpacked_wheel_tgt = self._make_unpacked_wheel(
pex_requirement,
include_patterns=['pex/pex.py', 'pex/__init__.py'],
module_name=module_name,
# TODO: `within_data_subdir` is only tested implicitly by the tensorflow_custom_op target
# in examples/! Make a fake wheel, resolve it, and test that `within_data_subdir`
# descends into the correct directory!
within_data_subdir=None)
context = self.context(target_roots=[unpacked_wheel_tgt])
unpack_task = self.create_task(context)
unpack_task.execute()
expected_files = {'pex/__init__.py', 'pex/pex.py'}
with unpack_task.invalidated([unpacked_wheel_tgt]) as invalidation_check:
vt = assert_single_element(invalidation_check.all_vts)
self.assertEqual(vt.target, unpacked_wheel_tgt)
archives = context.products.get_data(UnpackedArchives, dict)[vt.target]
self.assertEqual(expected_files, set(archives.found_files))
def test_unpacking(self):
self._assert_unpacking(module_name='pex')
def test_unpack_missing_module_name(self):
with self.assertRaisesRegexp(UnpackWheels.WheelUnpackingError, re.escape(
'Error extracting wheel for target UnpackedWheels(unpack:foo): Exactly one dist was expected to match name not-a-real-module')):
self._assert_unpacking(module_name='not-a-real-module')
| twitter/pants | tests/python/pants_test/backend/python/tasks/test_unpack_wheels.py | Python | apache-2.0 | 4,024 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Utils for EfficientNet models for Keras.
Write weights from ckpt file as in original repo
(https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet)
to h5 file for keras implementation of the models.
Usage:
# use checkpoint efficientnet-b0/model.ckpt (can be downloaded from
# https://storage.googleapis.com/cloud-tpu-checkpoints/
# efficientnet/ckptsaug/efficientnet-b0.tar.gz)
# to update weight without top layers, saving to efficientnetb0_notop.h5
python efficientnet_weight_update_util.py --model b0 --notop \
--ckpt efficientnet-b0/model.ckpt --o efficientnetb0_notop.h5
# use checkpoint noisy_student_efficientnet-b3/model.ckpt (providing
# improved result for b3, can be downloaded from
# https://storage.googleapis.com/cloud-tpu-checkpoints/
# efficientnet/noisystudent/noisy_student_efficientnet-b3.tar.gz)
# to update weight with top layers, saving to efficientnetb3_new.h5
python efficientnet_weight_update_util.py --model b3 --notop \
--ckpt noisy_student_efficientnet-b3/model.ckpt --o efficientnetb3_new.h5
"""
import argparse
import warnings
from keras.utils import io_utils
import tensorflow.compat.v2 as tf
from tensorflow.keras.applications import efficientnet
def write_ckpt_to_h5(path_h5, path_ckpt, keras_model, use_ema=True):
"""Map the weights in checkpoint file (tf) to h5 file (keras).
Args:
path_h5: str, path to output hdf5 file to write weights loaded from ckpt
files.
path_ckpt: str, path to the ckpt files (e.g. 'efficientnet-b0/model.ckpt')
that records efficientnet weights from original repo
https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet
keras_model: keras model, built from keras.applications efficientnet
functions (e.g. EfficientNetB0)
use_ema: Bool, whether to use ExponentialMovingAverage result or not
"""
model_name_keras = keras_model.name
model_name_tf = model_name_keras.replace('efficientnet', 'efficientnet-')
keras_weight_names = [w.name for w in keras_model.weights]
tf_weight_names = get_variable_names_from_ckpt(path_ckpt)
keras_blocks = get_keras_blocks(keras_weight_names)
tf_blocks = get_tf_blocks(tf_weight_names)
io_utils.print_msg('check variables match in each block')
for keras_block, tf_block in zip(keras_blocks, tf_blocks):
check_match(keras_block, tf_block, keras_weight_names, tf_weight_names,
model_name_tf)
io_utils.print_msg('{} and {} match.'.format(tf_block, keras_block))
block_mapping = {x[0]: x[1] for x in zip(keras_blocks, tf_blocks)}
changed_weights = 0
for w in keras_model.weights:
if 'block' in w.name:
# example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a'
keras_block = w.name.split('/')[0].split('_')[0]
tf_block = block_mapping[keras_block]
tf_name = keras_name_to_tf_name_block(
w.name,
keras_block=keras_block,
tf_block=tf_block,
use_ema=use_ema,
model_name_tf=model_name_tf)
elif any([x in w.name for x in ['stem', 'top', 'predictions', 'probs']]):
tf_name = keras_name_to_tf_name_stem_top(
w.name, use_ema=use_ema, model_name_tf=model_name_tf)
elif 'normalization' in w.name:
io_utils.print_msg(
f'Skipping variable {w.name}: normalization is a Keras '
'preprocessing layer, which does not exist in the TF ckpt.')
continue
else:
raise ValueError('{} failed to parse.'.format(w.name))
try:
w_tf = tf.train.load_variable(path_ckpt, tf_name)
if (w.value().numpy() != w_tf).any():
w.assign(w_tf)
changed_weights += 1
except ValueError as e:
if any([x in w.name for x in ['top', 'predictions', 'probs']]):
warnings.warn(
'Fail to load top layer variable {}'
'from {} because of {}.'.format(w.name, tf_name, e),
stacklevel=2)
else:
raise ValueError('Fail to load {} from {}'.format(w.name, tf_name))
total_weights = len(keras_model.weights)
io_utils.print_msg(f'{changed_weights}/{total_weights} weights updated')
keras_model.save_weights(path_h5)
def get_variable_names_from_ckpt(path_ckpt, use_ema=True):
"""Get list of tensor names from checkpoint.
Args:
path_ckpt: str, path to the ckpt files
use_ema: Bool, whether to use ExponentialMovingAverage result or not.
Returns:
List of variable names from checkpoint.
"""
v_all = tf.train.list_variables(path_ckpt)
# keep name only
v_name_all = [x[0] for x in v_all]
if use_ema:
v_name_all = [x for x in v_name_all if 'ExponentialMovingAverage' in x]
else:
v_name_all = [x for x in v_name_all if 'ExponentialMovingAverage' not in x]
# remove util variables used for RMSprop
v_name_all = [x for x in v_name_all if 'RMS' not in x]
return v_name_all
def get_tf_blocks(tf_weight_names):
"""Extract the block names from list of full weight names."""
# Example: 'efficientnet-b0/blocks_0/conv2d/kernel' -> 'blocks_0'
tf_blocks = {x.split('/')[1] for x in tf_weight_names if 'block' in x}
# sort by number
tf_blocks = sorted(tf_blocks, key=lambda x: int(x.split('_')[1]))
return tf_blocks
def get_keras_blocks(keras_weight_names):
"""Extract the block names from list of full weight names."""
# example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a'
keras_blocks = {x.split('_')[0] for x in keras_weight_names if 'block' in x}
return sorted(keras_blocks)
def keras_name_to_tf_name_stem_top(keras_name,
use_ema=True,
model_name_tf='efficientnet-b0'):
"""Mapping name in h5 to ckpt that is in stem or top (head).
we map name keras_name that points to a weight in h5 file
to a name of weight in ckpt file.
Args:
keras_name: str, the name of weight in the h5 file of keras implementation
use_ema: Bool, use the ExponentialMovingAverage resuolt in ckpt or not
model_name_tf: str, the name of model in ckpt.
Returns:
String for the name of weight as in ckpt file.
Raises:
KeyError: if we cannot parse the keras_name.
"""
if use_ema:
ema = '/ExponentialMovingAverage'
else:
ema = ''
stem_top_dict = {
'probs/bias:0': '{}/head/dense/bias{}',
'probs/kernel:0': '{}/head/dense/kernel{}',
'predictions/bias:0': '{}/head/dense/bias{}',
'predictions/kernel:0': '{}/head/dense/kernel{}',
'stem_conv/kernel:0': '{}/stem/conv2d/kernel{}',
'top_conv/kernel:0': '{}/head/conv2d/kernel{}',
}
for x in stem_top_dict:
stem_top_dict[x] = stem_top_dict[x].format(model_name_tf, ema)
# stem batch normalization
for bn_weights in ['beta', 'gamma', 'moving_mean', 'moving_variance']:
tf_name = '{}/stem/tpu_batch_normalization/{}{}'.format(
model_name_tf, bn_weights, ema)
stem_top_dict['stem_bn/{}:0'.format(bn_weights)] = tf_name
# top / head batch normalization
for bn_weights in ['beta', 'gamma', 'moving_mean', 'moving_variance']:
tf_name = '{}/head/tpu_batch_normalization/{}{}'.format(
model_name_tf, bn_weights, ema)
stem_top_dict['top_bn/{}:0'.format(bn_weights)] = tf_name
if keras_name in stem_top_dict:
return stem_top_dict[keras_name]
raise KeyError('{} from h5 file cannot be parsed'.format(keras_name))
def keras_name_to_tf_name_block(keras_name,
keras_block='block1a',
tf_block='blocks_0',
use_ema=True,
model_name_tf='efficientnet-b0'):
"""Mapping name in h5 to ckpt that belongs to a block.
we map name keras_name that points to a weight in h5 file
to a name of weight in ckpt file.
Args:
keras_name: str, the name of weight in the h5 file of keras implementation
keras_block: str, the block name for keras implementation (e.g. 'block1a')
tf_block: str, the block name for tf implementation (e.g. 'blocks_0')
use_ema: Bool, use the ExponentialMovingAverage resuolt in ckpt or not
model_name_tf: str, the name of model in ckpt.
Returns:
String for the name of weight as in ckpt file.
Raises:
ValueError if keras_block does not show up in keras_name
"""
if keras_block not in keras_name:
raise ValueError('block name {} not found in {}'.format(
keras_block, keras_name))
# all blocks in the first group will not have expand conv and bn
is_first_blocks = (keras_block[5] == '1')
tf_name = [model_name_tf, tf_block]
# depthwide conv
if 'dwconv' in keras_name:
tf_name.append('depthwise_conv2d')
tf_name.append('depthwise_kernel')
# conv layers
if is_first_blocks:
# first blocks only have one conv2d
if 'project_conv' in keras_name:
tf_name.append('conv2d')
tf_name.append('kernel')
else:
if 'project_conv' in keras_name:
tf_name.append('conv2d_1')
tf_name.append('kernel')
elif 'expand_conv' in keras_name:
tf_name.append('conv2d')
tf_name.append('kernel')
# squeeze expansion layers
if '_se_' in keras_name:
if 'reduce' in keras_name:
tf_name.append('se/conv2d')
elif 'expand' in keras_name:
tf_name.append('se/conv2d_1')
if 'kernel' in keras_name:
tf_name.append('kernel')
elif 'bias' in keras_name:
tf_name.append('bias')
# batch normalization layers
if 'bn' in keras_name:
if is_first_blocks:
if 'project' in keras_name:
tf_name.append('tpu_batch_normalization_1')
else:
tf_name.append('tpu_batch_normalization')
else:
if 'project' in keras_name:
tf_name.append('tpu_batch_normalization_2')
elif 'expand' in keras_name:
tf_name.append('tpu_batch_normalization')
else:
tf_name.append('tpu_batch_normalization_1')
for x in ['moving_mean', 'moving_variance', 'beta', 'gamma']:
if x in keras_name:
tf_name.append(x)
if use_ema:
tf_name.append('ExponentialMovingAverage')
return '/'.join(tf_name)
def check_match(keras_block, tf_block, keras_weight_names, tf_weight_names,
model_name_tf):
"""Check if the weights in h5 and ckpt match.
we match each name from keras_weight_names that is in keras_block
and check if there is 1-1 correspondence to names from tf_weight_names
that is in tf_block
Args:
keras_block: str, the block name for keras implementation (e.g. 'block1a')
tf_block: str, the block name for tf implementation (e.g. 'blocks_0')
keras_weight_names: list of str, weight names in keras implementation
tf_weight_names: list of str, weight names in tf implementation
model_name_tf: str, the name of model in ckpt.
"""
names_from_keras = set()
for x in keras_weight_names:
if keras_block in x:
y = keras_name_to_tf_name_block(
x,
keras_block=keras_block,
tf_block=tf_block,
model_name_tf=model_name_tf)
names_from_keras.add(y)
names_from_tf = set()
for x in tf_weight_names:
if tf_block in x and x.split('/')[1].endswith(tf_block):
names_from_tf.add(x)
names_missing = names_from_keras - names_from_tf
if names_missing:
raise ValueError('{} variables not found in checkpoint file: {}'.format(
len(names_missing), names_missing))
names_unused = names_from_tf - names_from_keras
if names_unused:
warnings.warn(
'{} variables from checkpoint file are not used: {}'.format(
len(names_unused), names_unused),
stacklevel=2)
if __name__ == '__main__':
arg_to_model = {
'b0': efficientnet.EfficientNetB0,
'b1': efficientnet.EfficientNetB1,
'b2': efficientnet.EfficientNetB2,
'b3': efficientnet.EfficientNetB3,
'b4': efficientnet.EfficientNetB4,
'b5': efficientnet.EfficientNetB5,
'b6': efficientnet.EfficientNetB6,
'b7': efficientnet.EfficientNetB7
}
p = argparse.ArgumentParser(description='write weights from checkpoint to h5')
p.add_argument(
'--model',
required=True,
type=str,
help='name of efficient model',
choices=arg_to_model.keys())
p.add_argument(
'--notop',
action='store_true',
help='do not include top layers',
default=False)
p.add_argument('--ckpt', required=True, type=str, help='checkpoint path')
p.add_argument(
'--output', '-o', required=True, type=str, help='output (h5) file path')
args = p.parse_args()
include_top = not args.notop
model = arg_to_model[args.model](include_top=include_top)
write_ckpt_to_h5(args.output, args.ckpt, keras_model=model)
| keras-team/keras | keras/applications/efficientnet_weight_update_util.py | Python | apache-2.0 | 13,341 |
# data.world-py
# Copyright 2017 data.world, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# This product includes software developed at
# data.world, Inc.(http://data.world/).
from __future__ import absolute_import
import click
from datadotworld.config import FileConfig
@click.group()
@click.option('--profile', '-p', default='default', help='Account name',
metavar='<profile>')
@click.pass_context
def cli(ctx, profile):
"""dw commands support working with multiple data.world accounts
\b
Use a different <profile> value for each account.
In the absence of a <profile>, 'default' will be used.
"""
if ctx.obj is None:
ctx.obj = {}
ctx.obj['profile'] = profile
pass
@click.command()
@click.option('--token', '-t',
prompt='API token (obtained at: '
'https://data.world/settings/advanced)',
help='Authentication token for API access')
@click.pass_obj
def configure(obj, token):
"""Use this command to configure API tokens
"""
config = obj.get('config') or FileConfig(obj['profile'])
config.auth_token = token
config.save()
cli.add_command(configure)
| datadotworld/data.world-py | datadotworld/cli.py | Python | apache-2.0 | 1,688 |
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example deactivates all active line items custom fields.
To determine which custom fields exist, run get_all_custom_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
def main(client):
# Initialize appropriate service.
custom_field_service = client.GetService(
'CustomFieldService', version='v201808')
# Create statement to select only active custom fields that apply to
# line items.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where('entityType = :entityType and isActive = :isActive')
.WithBindVariable('entityType', 'LINE_ITEM')
.WithBindVariable('isActive', True))
custom_fields_deactivated = 0
# Get custom fields by statement.
while True:
response = custom_field_service.getCustomFieldsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
# Display results.
for custom_field in response['results']:
print ('Custom field with ID "%s" and name "%s" will'
' be deactivated.' % (custom_field['id'], custom_field['name']))
result = custom_field_service.performCustomFieldAction(
{'xsi_type': 'DeactivateCustomFields'}, statement.ToStatement())
if result and int(result['numChanges']) > 0:
custom_fields_deactivated += int(result['numChanges'])
statement.offset += statement.limit
else:
break
if custom_fields_deactivated > 0:
print 'Number of custom fields deactivated: %s' % custom_fields_deactivated
else:
print 'No custom fields were deactivated.'
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client)
| Aloomaio/googleads-python-lib | examples/ad_manager/v201808/custom_field_service/deactivate_all_line_item_custom_fields.py | Python | apache-2.0 | 2,682 |
""" Cloud API asynchronous "PDF To Text" job example.
Allows to avoid timeout errors when processing huge or scanned PDF documents.
"""
import os
import requests # pip install requests
import time
import datetime
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co
API_KEY = "******************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Direct URLs of image files to convert to PDF document
# You can also upload your own file into PDF.co and use it as url. Check "Upload File" samples for code snippets: https://github.com/bytescout/pdf-co-api-samples/tree/master/File%20Upload/
SourceFiles = [
"https://bytescout-com.s3.amazonaws.com/files/demo-files/cloud-api/image-to-pdf/image1.png",
"https://bytescout-com.s3.amazonaws.com/files/demo-files/cloud-api/image-to-pdf/image2.jpg"
]
# Destination PDF file name
DestinationFile = ".\\result.pdf"
# (!) Make asynchronous job
Async = True
def main(args = None):
SourceFileURL = ",".join(SourceFiles)
convertImageToPDF(SourceFileURL, DestinationFile)
def convertImageToPDF(uploadedFileUrl, destinationFile):
"""Converts Image To PDF using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["async"] = Async
parameters["name"] = os.path.basename(destinationFile)
parameters["url"] = uploadedFileUrl
# Prepare URL for 'Image To PDF' API request
url = "{}/pdf/convert/from/image".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Asynchronous job ID
jobId = json["jobId"]
# URL of the result file
resultFileUrl = json["url"]
# Check the job status in a loop.
# If you don't want to pause the main thread you can rework the code
# to use a separate thread for the status checking and completion.
while True:
status = checkJobStatus(jobId) # Possible statuses: "working", "failed", "aborted", "success".
# Display timestamp and status (for demo purposes)
print(datetime.datetime.now().strftime("%H:%M.%S") + ": " + status)
if status == "success":
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
break
elif status == "working":
# Pause for a few seconds
time.sleep(3)
else:
print(status)
break
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def checkJobStatus(jobId):
"""Checks server job status"""
url = f"{BASE_URL}/job/check?jobid={jobId}"
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
return json["status"]
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main() | bytescout/ByteScout-SDK-SourceCode | PDF.co Web API/Image To PDF API/Python/Convert Images To PDF From URLs Asynchronously/ConvertImagesToPdfFromUrlsAsynchronously.py | Python | apache-2.0 | 3,883 |
#!/usr/bin/env python
import pyeapi, sys, argparse, pprint
#pynet.py is in ~/pylib https://github.com/jrogers512/pynet/blob/master/pynet.py
import pynet
def valid_id(i):
##Check that the VLAN is valid and not already in use
vlan_id = int(i)
if vlan_id > 999 or vlan_id < 100:
raise argparse.ArgumentTypeError("Only VLAN's between 100 and 999 are allowed")
return vlan_id
def main():
parser = argparse.ArgumentParser("ex2.py")
parser.add_argument('--check', action="store_true", dest="check_only", default=False, help='Do not make changes, check only')
parser.add_argument('--remove', action="store_true", dest="remove_vlan", default=False, help='Delete the VLAN')
parser.add_argument('--name', action="store", dest="vlan_name", help='VLAN name')
parser.add_argument('device', action="store", help='device hostname as found in ~/.eapi.conf, see https://eos.arista.com/introducing-the-python-client-for-eapi-pyeapi/')
parser.add_argument('vlan_id', type=valid_id, action="store", help='The VLAN ID to work with')
args = parser.parse_args()
try:
args.vlan_id
except NameError:
args.vlan_id = 100
try:
args.vlan_name
except NameError:
args.vlan_name = 'VLAN' + str(vlan_id)
device = pyeapi.connect_to(args.device)
if not args.remove_vlan or args.check_only:
if check_vlan(args.vlan_id, args.vlan_name, device):
pass #VLAN Check is ok, go ahead and add it
add_vlan(args.vlan_id, args.vlan_name, device)
else:
print "ERR: 123, this should never happen"
else:
remove_vlan(args.vlan_id, device)
def remove_vlan(vlan_id, device):
cmds = ['no vlan ' + str(vlan_id)]
if device.config(cmds):
print "Deleting VLAN" + str(vlan_id)
def add_vlan(vlan_id, vlan_name, device):
cmds = ['vlan ' + str(vlan_id), 'name ' + vlan_name]
#Wouldn't mind having some error handling here, but I'm not sure what sort of 'return' from the .config method might be interpretted as an error?
if device.config(cmds):
print "Adding the " + vlan_name + " VLAN with ID " + str(vlan_id)
def check_vlan(vlan_id, vlan_name, device):
vlans = device.enable("show vlan")[0]['result']['vlans']
vlan_list = vlans.keys()
#vlans = map(int, vlans) ##pylint says not to do this
vlan_list = [int(i) for i in vlan_list]
##VLAN ID check
if vlan_id in vlan_list:
print >> sys.stderr, "Vlan " + str(vlan_id) + " is already in use, quitting."
sys.exit(1)
else:
print "VLAN " + str(vlan_id) + " is available"
##VLAN Name check
for vlan_id, attribs in vlans.iteritems():
if attribs['name'] == vlan_name:
print >> sys.stderr, "VLAN Name " + vlan_name + " already in use on VLAN " + vlan_id + ", quitting."
sys.exit(2)
return True
if __name__ == "__main__":
main()
| jrogers512/pynet | class7/ex2.py | Python | apache-2.0 | 2,922 |
from django import http
from django.utils.deprecation import MiddlewareMixin
from wagtail.core.models import Site
try:
from django.conf import settings
XS_SHARING_ALLOWED_ORIGINS = settings.XS_SHARING_ALLOWED_ORIGINS
XS_SHARING_ALLOWED_METHODS = settings.XS_SHARING_ALLOWED_METHODS
XS_SHARING_ALLOWED_HEADERS = settings.XS_SHARING_ALLOWED_HEADERS
XS_SHARING_ALLOWED_CREDENTIALS = settings.XS_SHARING_ALLOWED_CREDENTIALS
except AttributeError:
XS_SHARING_ALLOWED_ORIGINS = '*'
XS_SHARING_ALLOWED_METHODS = ['POST', 'GET', 'OPTIONS', 'PUT', 'DELETE']
XS_SHARING_ALLOWED_HEADERS = ['Content-Type', '*']
XS_SHARING_ALLOWED_CREDENTIALS = 'true'
class SiteMiddleware(MiddlewareMixin):
def process_request(self, request):
"""
Set request.site to contain the Site object responsible for handling this request.
"""
try:
request.site = Site.find_for_request(request)
except Site.DoesNotExist:
request.site = None
class XsSharing(object):
"""
This middleware allows cross-domain XHR using the HTML5 postMessage API.
Access-Control-Allow-Origin: http://foo.example
Access-Control-Allow-Methods: POST, GET, OPTIONS, PUT, DELETE
Based off https://gist.github.com/426829
"""
def process_request(self, request):
if 'HTTP_ACCESS_CONTROL_REQUEST_METHOD' in request.META:
response = http.HttpResponse()
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join(XS_SHARING_ALLOWED_METHODS)
response['Access-Control-Allow-Headers'] = ",".join(XS_SHARING_ALLOWED_HEADERS)
response['Access-Control-Allow-Credentials'] = XS_SHARING_ALLOWED_CREDENTIALS
return response
return None
def process_response(self, request, response):
response['Access-Control-Allow-Origin'] = XS_SHARING_ALLOWED_ORIGINS
response['Access-Control-Allow-Methods'] = ",".join(XS_SHARING_ALLOWED_METHODS)
response['Access-Control-Allow-Headers'] = ",".join(XS_SHARING_ALLOWED_HEADERS)
response['Access-Control-Allow-Credentials'] = XS_SHARING_ALLOWED_CREDENTIALS
return response
| parksandwildlife/oim-cms | oim_cms/middleware.py | Python | apache-2.0 | 2,264 |
Subsets and Splits