repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
v-iam/azure-sdk-for-python | azure-mgmt-keyvault/azure/mgmt/keyvault/models/__init__.py | 4 | 1576 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sku import Sku
from .permissions import Permissions
from .access_policy_entry import AccessPolicyEntry
from .vault_properties import VaultProperties
from .deleted_vault_properties import DeletedVaultProperties
from .vault_create_or_update_parameters import VaultCreateOrUpdateParameters
from .vault import Vault
from .deleted_vault import DeletedVault
from .resource import Resource
from .vault_paged import VaultPaged
from .deleted_vault_paged import DeletedVaultPaged
from .resource_paged import ResourcePaged
from .key_vault_management_client_enums import (
SkuName,
KeyPermissions,
SecretPermissions,
CertificatePermissions,
StoragePermissions,
CreateMode,
)
__all__ = [
'Sku',
'Permissions',
'AccessPolicyEntry',
'VaultProperties',
'DeletedVaultProperties',
'VaultCreateOrUpdateParameters',
'Vault',
'DeletedVault',
'Resource',
'VaultPaged',
'DeletedVaultPaged',
'ResourcePaged',
'SkuName',
'KeyPermissions',
'SecretPermissions',
'CertificatePermissions',
'StoragePermissions',
'CreateMode',
]
| mit | -1,740,209,341,972,420,900 | 29.307692 | 76 | 0.678934 | false |
fnp/wolnelektury | src/sponsors/widgets.py | 1 | 1450 | # This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
from django.conf import settings
from django import forms
from django.utils.safestring import mark_safe
from sponsors import models
class SponsorPageWidget(forms.Textarea):
class Media:
js = (
'//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
'//code.jquery.com/ui/1.12.1/jquery-ui.min.js',
settings.STATIC_URL + 'sponsors/js/jquery.json.min.js',
settings.STATIC_URL + 'sponsors/js/footer_admin.js',
)
css = {
'all': (settings.STATIC_URL + 'sponsors/css/footer_admin.css',),
}
def render(self, name, value, attrs=None, renderer=None):
output = [super(SponsorPageWidget, self).render(name, value, attrs, renderer)]
sponsors = [(str(obj), obj.pk, obj.logo.url) for obj in models.Sponsor.objects.all().iterator()]
sponsors_js = ', '.join('{name: "%s", id: %d, image: "%s"}' % sponsor for sponsor in sponsors)
output.append('<script type="text/javascript">$(function(e) {')
# TODO: "id_" is hard-coded here. This should instead use the correct
# API to determine the ID dynamically.
output.append('$("#id_%s").sponsorsFooter({sponsors: [%s]}); });</script>\n' % (name, sponsors_js))
return mark_safe(''.join(output))
| agpl-3.0 | -4,562,136,078,529,503,700 | 45.741935 | 107 | 0.636991 | false |
timpalpant/calibre | src/calibre/ebooks/pdb/ereader/inspector.py | 24 | 5889 | # -*- coding: utf-8 -*-
'''
Inspect the header of ereader files. This is primarily used for debugging.
'''
__license__ = 'GPL v3'
__copyright__ = '2009, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
import struct
import sys
from calibre.ebooks.pdb.ereader import EreaderError
from calibre.ebooks.pdb.header import PdbHeaderReader
def ereader_header_info(header):
h0 = header.section_data(0)
print 'Header Size: %s' % len(h0)
if len(h0) == 132:
print 'Header Type: Dropbook compatible'
print ''
ereader_header_info132(h0)
elif len(h0) == 202:
print 'Header Type: Makebook compatible'
print ''
ereader_header_info202(h0)
else:
raise EreaderError('Size mismatch. eReader header record size %i KB is not supported.' % len(h0))
def pdb_header_info(header):
print 'PDB Header Info:'
print ''
print 'Identity: %s' % header.ident
print 'Total Sectons: %s' % header.num_sections
print 'Title: %s' % header.title
print ''
def ereader_header_info132(h0):
print 'Ereader Record 0 (Header) Info:'
print ''
print '0-2 Version: %i' % struct.unpack('>H', h0[0:2])[0]
print '2-4: %i' % struct.unpack('>H', h0[2:4])[0]
print '4-6: %i' % struct.unpack('>H', h0[4:6])[0]
print '6-8 Codepage: %i' % struct.unpack('>H', h0[6:8])[0]
print '8-10: %i' % struct.unpack('>H', h0[8:10])[0]
print '10-12: %i' % struct.unpack('>H', h0[10:12])[0]
print '12-14 Non-Text offset: %i' % struct.unpack('>H', h0[12:14])[0]
print '14-16: %i' % struct.unpack('>H', h0[14:16])[0]
print '16-18: %i' % struct.unpack('>H', h0[16:18])[0]
print '18-20: %i' % struct.unpack('>H', h0[18:20])[0]
print '20-22 Image Count: %i' % struct.unpack('>H', h0[20:22])[0]
print '22-24: %i' % struct.unpack('>H', h0[22:24])[0]
print '24-26 Has Metadata?: %i' % struct.unpack('>H', h0[24:26])[0]
print '26-28: %i' % struct.unpack('>H', h0[26:28])[0]
print '28-30 Footnote Count: %i' % struct.unpack('>H', h0[28:30])[0]
print '30-32 Sidebar Count: %i' % struct.unpack('>H', h0[30:32])[0]
print '32-34 Bookmark Offset: %i' % struct.unpack('>H', h0[32:34])[0]
print '34-36 MAGIC: %i' % struct.unpack('>H', h0[34:36])[0]
print '36-38: %i' % struct.unpack('>H', h0[36:38])[0]
print '38-40: %i' % struct.unpack('>H', h0[38:40])[0]
print '40-42 Image Data Offset: %i' % struct.unpack('>H', h0[40:42])[0]
print '42-44: %i' % struct.unpack('>H', h0[42:44])[0]
print '44-46 Metadata Offset: %i' % struct.unpack('>H', h0[44:46])[0]
print '46-48: %i' % struct.unpack('>H', h0[46:48])[0]
print '48-50 Footnote Offset: %i' % struct.unpack('>H', h0[48:50])[0]
print '50-52 Sidebar Offset: %i' % struct.unpack('>H', h0[50:52])[0]
print '52-54 Last Data Offset: %i' % struct.unpack('>H', h0[52:54])[0]
for i in range(54, 131, 2):
print '%i-%i: %i' % (i, i+2, struct.unpack('>H', h0[i:i+2])[0])
print ''
def ereader_header_info202(h0):
print 'Ereader Record 0 (Header) Info:'
print ''
print '0-2 Version: %i' % struct.unpack('>H', h0[0:2])[0]
print '2-4 Garbage: %i' % struct.unpack('>H', h0[2:4])[0]
print '4-6 Garbage: %i' % struct.unpack('>H', h0[4:6])[0]
print '6-8 Garbage: %i' % struct.unpack('>H', h0[6:8])[0]
print '8-10 Non-Text Offset: %i' % struct.unpack('>H', h0[8:10])[0]
print '10-12: %i' % struct.unpack('>H', h0[10:12])[0]
print '12-14: %i' % struct.unpack('>H', h0[12:14])[0]
print '14-16 Garbage: %i' % struct.unpack('>H', h0[14:16])[0]
print '16-18 Garbage: %i' % struct.unpack('>H', h0[16:18])[0]
print '18-20 Garbage: %i' % struct.unpack('>H', h0[18:20])[0]
print '20-22 Garbage: %i' % struct.unpack('>H', h0[20:22])[0]
print '22-24 Garbage: %i' % struct.unpack('>H', h0[22:24])[0]
print '24-26: %i' % struct.unpack('>H', h0[24:26])[0]
print '26-28: %i' % struct.unpack('>H', h0[26:28])[0]
for i in range(28, 98, 2):
print '%i-%i Garbage: %i' % (i, i+2, struct.unpack('>H', h0[i:i+2])[0])
print '98-100: %i' % struct.unpack('>H', h0[98:100])[0]
for i in range(100, 110, 2):
print '%i-%i Garbage: %i' % (i, i+2, struct.unpack('>H', h0[i:i+2])[0])
print '110-112: %i' % struct.unpack('>H', h0[110:112])[0]
print '112-114: %i' % struct.unpack('>H', h0[112:114])[0]
print '114-116 Garbage: %i' % struct.unpack('>H', h0[114:116])[0]
for i in range(116, 202, 2):
print '%i-%i: %i' % (i, i+2, struct.unpack('>H', h0[i:i+2])[0])
print ''
print '* Garbage: Random values.'
print ''
def section_lengths(header):
print 'Section Sizes'
print ''
for i in range(0, header.section_count()):
size = len(header.section_data(i))
if size > 65505:
message = '<--- Over!'
else:
message = ''
print 'Section %i: %i %s' % (i, size, message)
def main(args=sys.argv):
if len(args) < 2:
print 'Error: requires input file.'
return 1
f = open(sys.argv[1], 'rb')
pheader = PdbHeaderReader(f)
pdb_header_info(pheader)
ereader_header_info(pheader)
section_lengths(pheader)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | -1,731,668,282,084,926,700 | 41.673913 | 105 | 0.50467 | false |
ofek/pypinfo | tests/test_db.py | 1 | 1127 | from pypinfo import db
CREDS_FILE = '/path/to/creds_file.json'
def test_get_credentials(tmp_path):
# Arrange
db.DB_FILE = str(tmp_path / 'db.json') # Mock
# Assert
assert db.get_credentials() is None
def test_set_credentials(tmp_path):
# Arrange
db.DB_FILE = str(tmp_path / 'db.json') # Mock
# Act
db.set_credentials(CREDS_FILE)
def test_set_credentials_twice(tmp_path):
# Arrange
db.DB_FILE = str(tmp_path / 'db.json') # Mock
# Act
db.set_credentials(CREDS_FILE)
db.set_credentials(CREDS_FILE)
def test_round_trip(tmp_path):
# Arrange
db.DB_FILE = str(tmp_path / 'db.json') # Mock
# Act
db.set_credentials(CREDS_FILE)
# Assert
assert db.get_credentials() == CREDS_FILE
def test_get_credentials_table(tmp_path):
db.DB_FILE = str(tmp_path / 'db.json')
with db.get_credentials_table() as table:
assert not table._storage._handle.closed
with db.get_credentials_table(table) as table2:
assert table2 is table
assert not table._storage._handle.closed
assert table._storage._handle.closed
| mit | -4,920,324,141,681,849,000 | 22 | 55 | 0.639752 | false |
dgjustice/ansible | lib/ansible/modules/storage/netapp/sf_account_manager.py | 16 | 9063 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: sf_account_manager
short_description: Manage SolidFire accounts
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create, destroy, or update accounts on SolidFire
options:
state:
description:
- Whether the specified account should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- Unique username for this account. (May be 1 to 64 characters in length).
required: true
new_name:
description:
- New name for the user account.
required: false
default: None
initiator_secret:
description:
- CHAP secret to use for the initiator. Should be 12-16 characters long and impenetrable.
- The CHAP initiator secrets must be unique and cannot be the same as the target CHAP secret.
- If not specified, a random secret is created.
required: false
target_secret:
description:
- CHAP secret to use for the target (mutual CHAP authentication).
- Should be 12-16 characters long and impenetrable.
- The CHAP target secrets must be unique and cannot be the same as the initiator CHAP secret.
- If not specified, a random secret is created.
required: false
attributes:
description: List of Name/Value pairs in JSON object format.
required: false
account_id:
description:
- The ID of the account to manage or update.
required: false
default: None
status:
description:
- Status of the account.
required: false
'''
EXAMPLES = """
- name: Create Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: TenantA
- name: Modify Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: present
name: TenantA
new_name: TenantA-Renamed
- name: Delete Account
sf_account_manager:
hostname: "{{ solidfire_hostname }}"
username: "{{ solidfire_username }}"
password: "{{ solidfire_password }}"
state: absent
name: TenantA-Renamed
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class SolidFireAccount(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
account_id=dict(required=False, type='int', default=None),
new_name=dict(required=False, type='str', default=None),
initiator_secret=dict(required=False, type='str'),
target_secret=dict(required=False, type='str'),
attributes=dict(required=False, type='dict'),
status=dict(required=False, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.account_id = p['account_id']
self.new_name = p['new_name']
self.initiator_secret = p['initiator_secret']
self.target_secret = p['target_secret']
self.attributes = p['attributes']
self.status = p['status']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module)
def get_account(self):
"""
Return account object if found
:return: Details about the account. None if not found.
:rtype: dict
"""
account_list = self.sfe.list_accounts()
for account in account_list.accounts:
if account.username == self.name:
# Update self.account_id:
if self.account_id is not None:
if account.account_id == self.account_id:
return account
else:
self.account_id = account.account_id
return account
return None
def create_account(self):
try:
self.sfe.add_account(username=self.name,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
except:
err = get_exception()
self.module.fail_json(msg='Error creating account %s' % self.name, exception=str(err))
def delete_account(self):
try:
self.sfe.remove_account(account_id=self.account_id)
except:
err = get_exception()
self.module.fail_json(msg='Error deleting account %s' % self.account_id, exception=str(err))
def update_account(self):
try:
self.sfe.modify_account(account_id=self.account_id,
username=self.new_name,
status=self.status,
initiator_secret=self.initiator_secret,
target_secret=self.target_secret,
attributes=self.attributes)
except:
err = get_exception()
self.module.fail_json(msg='Error updating account %s' % self.account_id, exception=str(err))
def apply(self):
changed = False
account_exists = False
update_account = False
account_detail = self.get_account()
if account_detail:
account_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Check if we need to update the account
if account_detail.username is not None and self.new_name is not None and \
account_detail.username != self.new_name:
update_account = True
changed = True
elif account_detail.status is not None and self.status is not None \
and account_detail.status != self.status:
update_account = True
changed = True
elif account_detail.initiator_secret is not None and self.initiator_secret is not None \
and account_detail.initiator_secret != self.initiator_secret:
update_account = True
changed = True
elif account_detail.target_secret is not None and self.target_secret is not None \
and account_detail.target_secret != self.target_secret:
update_account = True
changed = True
elif account_detail.attributes is not None and self.attributes is not None \
and account_detail.attributes != self.attributes:
update_account = True
changed = True
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not account_exists:
self.create_account()
elif update_account:
self.update_account()
elif self.state == 'absent':
self.delete_account()
self.module.exit_json(changed=changed)
def main():
v = SolidFireAccount()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | -6,697,773,245,260,161,000 | 31.367857 | 104 | 0.575637 | false |
great-expectations/great_expectations | great_expectations/expectations/metrics/column_map_metrics/column_values_json_parseable.py | 1 | 1101 | import json
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.expectations.metrics.import_manager import F, sparktypes
from great_expectations.expectations.metrics.map_metric import (
ColumnMapMetricProvider,
column_condition_partial,
)
class ColumnValuesJsonParseable(ColumnMapMetricProvider):
condition_metric_name = "column_values.json_parseable"
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
def is_json(val):
try:
json.loads(val)
return True
except:
return False
return column.map(is_json)
@column_condition_partial(engine=SparkDFExecutionEngine)
def _spark(cls, column, json_schema, **kwargs):
def is_json(val):
try:
json.loads(val)
return True
except:
return False
is_json_udf = F.udf(is_json, sparktypes.BooleanType())
return is_json_udf(column)
| apache-2.0 | 6,666,463,513,467,871,000 | 27.230769 | 80 | 0.643052 | false |
swalladge/ranger | ranger/gui/widgets/view_miller.py | 2 | 9616 | # This file is part of ranger, the console file manager.
# License: GNU GPL version 3, see the file "AUTHORS" for details.
"""ViewMiller arranges the view in miller columns"""
from __future__ import (absolute_import, division, print_function)
import curses
from ranger.container import settings
from ranger.gui.widgets.view_base import ViewBase
from .browsercolumn import BrowserColumn
from .pager import Pager
from ..displayable import DisplayableContainer
class ViewMiller(ViewBase): # pylint: disable=too-many-ancestors,too-many-instance-attributes
ratios = None
preview = True
is_collapsed = False
stretch_ratios = None
old_collapse = False
def __init__(self, win):
ViewBase.__init__(self, win)
self.preview = True
self.columns = []
self.pager = Pager(self.win, embedded=True)
self.pager.visible = False
self.add_child(self.pager)
self.rebuild()
for option in ('preview_directories', 'preview_files'):
self.settings.signal_bind('setopt.' + option,
self._request_clear_if_has_borders, weak=True)
self.settings.signal_bind('setopt.column_ratios', self.request_clear)
self.settings.signal_bind('setopt.column_ratios', self.rebuild,
priority=settings.SIGNAL_PRIORITY_AFTER_SYNC)
self.old_draw_borders = self.settings.draw_borders
def rebuild(self):
for child in self.container:
if isinstance(child, BrowserColumn):
self.remove_child(child)
child.destroy()
ratios = self.settings.column_ratios
for column in self.columns:
column.destroy()
self.remove_child(column)
self.columns = []
ratios_sum = sum(ratios)
self.ratios = tuple((x / ratios_sum) for x in ratios)
last = 0.1 if self.settings.padding_right else 0
if len(self.ratios) >= 2:
self.stretch_ratios = self.ratios[:-2] + \
((self.ratios[-2] + self.ratios[-1] * 1.0 - last),
(self.ratios[-1] * last))
offset = 1 - len(ratios)
if self.preview:
offset += 1
for level in range(len(ratios)):
column = BrowserColumn(self.win, level + offset)
self.add_child(column)
self.columns.append(column)
try:
self.main_column = self.columns[self.preview and -2 or -1]
except IndexError:
self.main_column = None
else:
self.main_column.display_infostring = True
self.main_column.main_column = True
self.resize(self.y, self.x, self.hei, self.wid)
def _request_clear_if_has_borders(self):
if self.settings.draw_borders:
self.request_clear()
def draw(self):
if self.need_clear:
self.win.erase()
self.need_redraw = True
self.need_clear = False
for tab in self.fm.tabs.values():
directory = tab.thisdir
if directory:
directory.load_content_if_outdated()
directory.use()
DisplayableContainer.draw(self)
if self.settings.draw_borders:
self._draw_borders()
if self.draw_bookmarks:
self._draw_bookmarks()
elif self.draw_hints:
self._draw_hints()
elif self.draw_info:
self._draw_info(self.draw_info)
def _draw_borders(self):
win = self.win
self.color('in_browser', 'border')
left_start = 0
right_end = self.wid - 1
for child in self.columns:
if not child.has_preview():
left_start = child.x + child.wid
else:
break
# Shift the rightmost vertical line to the left to create a padding,
# but only when padding_right is on, the preview column is collapsed
# and we did not open the pager to "zoom" in to the file.
if self.settings.padding_right and not self.pager.visible and self.is_collapsed:
right_end = self.columns[-1].x - 1
if right_end < left_start:
right_end = self.wid - 1
# Draw horizontal lines and the leftmost vertical line
try:
# pylint: disable=no-member
win.hline(0, left_start, curses.ACS_HLINE, right_end - left_start)
win.hline(self.hei - 1, left_start, curses.ACS_HLINE, right_end - left_start)
win.vline(1, left_start, curses.ACS_VLINE, self.hei - 2)
# pylint: enable=no-member
except curses.error:
pass
# Draw the vertical lines in the middle
for child in self.columns[:-1]:
if not child.has_preview():
continue
if child.main_column and self.pager.visible:
# If we "zoom in" with the pager, we have to
# skip the between main_column and pager.
break
x = child.x + child.wid
y = self.hei - 1
try:
# pylint: disable=no-member
win.vline(1, x, curses.ACS_VLINE, y - 1)
self.addch(0, x, curses.ACS_TTEE, 0)
self.addch(y, x, curses.ACS_BTEE, 0)
# pylint: enable=no-member
except curses.error:
# in case it's off the boundaries
pass
# Draw the last vertical line
try:
# pylint: disable=no-member
win.vline(1, right_end, curses.ACS_VLINE, self.hei - 2)
# pylint: enable=no-member
except curses.error:
pass
# pylint: disable=no-member
self.addch(0, left_start, curses.ACS_ULCORNER)
self.addch(self.hei - 1, left_start, curses.ACS_LLCORNER)
self.addch(0, right_end, curses.ACS_URCORNER)
self.addch(self.hei - 1, right_end, curses.ACS_LRCORNER)
# pylint: enable=no-member
def _collapse(self):
# Should the last column be cut off? (Because there is no preview)
if not self.settings.collapse_preview or not self.preview \
or not self.stretch_ratios:
return False
result = not self.columns[-1].has_preview()
target = self.columns[-1].target
if not result and target and target.is_file:
if self.fm.settings.preview_script and \
self.fm.settings.use_preview_script:
try:
result = not self.fm.previews[target.realpath]['foundpreview']
except KeyError:
return self.old_collapse
self.old_collapse = result
return result
def resize(self, y, x, hei=None, wid=None):
"""Resize all the columns according to the given ratio"""
ViewBase.resize(self, y, x, hei, wid)
borders = self.settings.draw_borders
pad = 1 if borders else 0
left = pad
self.is_collapsed = self._collapse()
if self.is_collapsed:
generator = enumerate(self.stretch_ratios)
else:
generator = enumerate(self.ratios)
last_i = len(self.ratios) - 1
for i, ratio in generator:
wid = int(ratio * self.wid)
cut_off = self.is_collapsed and not self.settings.padding_right
if i == last_i:
if not cut_off:
wid = int(self.wid - left + 1 - pad)
else:
self.columns[i].resize(pad, max(0, left - 1), hei - pad * 2, 1)
self.columns[i].visible = False
continue
if i == last_i - 1:
self.pager.resize(pad, left, hei - pad * 2, max(1, self.wid - left - pad))
if cut_off:
self.columns[i].resize(pad, left, hei - pad * 2, max(1, self.wid - left - pad))
continue
try:
self.columns[i].resize(pad, left, hei - pad * 2, max(1, wid - 1))
except KeyError:
pass
left += wid
def open_pager(self):
self.pager.visible = True
self.pager.focused = True
self.need_clear = True
self.pager.open()
try:
self.columns[-1].visible = False
self.columns[-2].visible = False
except IndexError:
pass
def close_pager(self):
self.pager.visible = False
self.pager.focused = False
self.need_clear = True
self.pager.close()
try:
self.columns[-1].visible = True
self.columns[-2].visible = True
except IndexError:
pass
def poke(self):
ViewBase.poke(self)
# Show the preview column when it has a preview but has
# been hidden (e.g. because of padding_right = False)
if not self.columns[-1].visible and self.columns[-1].has_preview() \
and not self.pager.visible:
self.columns[-1].visible = True
if self.preview and self.is_collapsed != self._collapse():
if self.fm.settings.preview_files:
# force clearing the image when resizing preview column
self.columns[-1].clear_image(force=True)
self.resize(self.y, self.x, self.hei, self.wid)
if self.old_draw_borders != self.settings.draw_borders:
self.resize(self.y, self.x, self.hei, self.wid)
self.old_draw_borders = self.settings.draw_borders
| gpl-3.0 | 334,391,832,981,958,200 | 33.967273 | 99 | 0.556676 | false |
ForkedReposBak/mxnet | python/mxnet/numpy/multiarray.py | 2 | 394970 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines, unused-argument
"""numpy ndarray and util functions."""
try:
from __builtin__ import all as py_all
from __builtin__ import slice as py_slice
except ImportError:
from builtins import all as py_all
from builtins import slice as py_slice
from array import array as native_array
import functools
import ctypes
import warnings
import numpy as _np
from .. import _deferred_compute as dc
from ..autograd import is_recording
from ..ndarray import NDArray, _DTYPE_NP_TO_MX, _GRAD_REQ_MAP
from ..ndarray import indexing_key_expand_implicit_axes, get_indexing_dispatch_code,\
get_oshape_of_gather_nd_op
from ..ndarray._internal import _set_np_ndarray_class
from . import _op as _mx_np_op
from ..base import check_call, _LIB, NDArrayHandle, c_array
from ..base import mx_real_t, c_array_buf, mx_uint, numeric_types, integer_types
from ..context import Context
from ..util import set_module, wrap_np_unary_func, wrap_np_binary_func,\
is_np_default_dtype
from ..context import current_context
from ..ndarray import numpy as _mx_nd_np
from ..ndarray.numpy import _internal as _npi
from ..ndarray.ndarray import _storage_type, from_numpy
from .utils import _get_np_op
from .fallback import * # pylint: disable=wildcard-import,unused-wildcard-import
from . import fallback
__all__ = ['ndarray', 'empty', 'empty_like', 'array', 'shape', 'median',
'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'all', 'any', 'broadcast_to',
'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'fmod', 'power', 'bitwise_not',
'delete', 'trace', 'transpose',
'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'invert',
'sqrt', 'cbrt', 'abs', 'absolute', 'fabs', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log',
'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'histogram',
'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'append', 'argsort',
'sort', 'tensordot', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange',
'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'flatnonzero', 'tril_indices',
'concatenate', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin', 'amax', 'amin', 'max', 'min',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'insert',
'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman',
'logical_and', 'logical_or', 'logical_xor',
'flip', 'flipud', 'fliplr', 'around', 'round', 'round_', 'arctan2', 'hypot',
'triu_indices_from', 'triu_indices', 'tri',
'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad',
'unique', 'lcm', 'tril', 'triu', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',
'cross', 'kron', 'equal', 'not_equal', 'interp',
'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'nonzero',
'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'matmul',
'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', 'polyval', 'where', 'bincount',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'fill_diagonal', 'squeeze',
'diagflat', 'repeat', 'prod', 'pad', 'cumsum', 'sum', 'rollaxis', 'diag', 'diagonal']
__all__ += fallback.__all__
# Return code for dispatching indexing function call
_NDARRAY_UNSUPPORTED_INDEXING = -1
_NDARRAY_BASIC_INDEXING = 0
_NDARRAY_ADVANCED_INDEXING = 1
_NDARRAY_EMPTY_TUPLE_INDEXING = 2
# Return code for 0-d boolean array handler
_NDARRAY_NO_ZERO_DIM_BOOL_ARRAY = -1
_NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE = 0
_NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE = 1
# This function is copied from ndarray.py since pylint
# keeps giving false alarm error of undefined-all-variable
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t): # pylint: disable=redefined-outer-name
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `ndarray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[_np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
def _reshape_view(a, *shape): # pylint: disable=redefined-outer-name
"""Returns a **view** of this array with a new shape without altering any data.
Parameters
----------
shape : tuple of int, or n ints
The new shape should not change the array size, namely
``np.prod(new_shape)`` should be equal to ``np.prod(a.shape)``.
Some dimensions of the shape can take special value -1, which
infers the dimension of the output shape by using the remainder of the
input dimensions keeping the size of the new array same as that of the input array.
At most one dimension of shape can be -1.
Returns
-------
ndarray
An array with desired shape that shares data with this array.
"""
if len(shape) == 1 and isinstance(shape[0], (list, tuple)):
shape = shape[0]
handle = NDArrayHandle()
check_call(_LIB.MXNDArrayReshape64(a.handle,
len(shape),
c_array(ctypes.c_int64, shape),
False,
ctypes.byref(handle)))
return ndarray(handle=handle, writable=a.writable)
def _as_mx_np_array(object, ctx=None):
"""Convert object to mxnet.numpy.ndarray."""
if isinstance(object, _np.ndarray):
if not object.flags['C_CONTIGUOUS']:
object = _np.ascontiguousarray(object, dtype=object.dtype)
ret = from_numpy(object, array_cls=ndarray)
return ret if ctx is None else ret.as_in_ctx(ctx=ctx)
elif isinstance(object, (integer_types, numeric_types)):
return object
elif isinstance(object, (list, tuple)):
tmp = [_as_mx_np_array(arr) for arr in object]
return object.__class__(tmp)
elif isinstance(object, (_np.bool_, _np.bool)):
return array(object, dtype=_np.bool_, ctx=ctx)
else:
raise TypeError('Does not support converting {} to mx.np.ndarray.'.format(str(type(object))))
def _as_onp_array(object):
"""Convert object to mxnet.numpy.ndarray."""
cur_ctx = None
if isinstance(object, ndarray):
return object.asnumpy(), object.ctx
elif isinstance(object, (list, tuple)):
tmp = []
for arr in object:
arr, tmp_ctx = _as_onp_array(arr)
# if isinstance(arr, (list, tuple)):
# raise TypeError('type {} not supported'.format(str(type(arr))))
tmp.append(arr)
if cur_ctx is None:
cur_ctx = tmp_ctx
elif tmp_ctx is not None and cur_ctx != tmp_ctx:
raise ValueError('Ambiguous to set the context for the output ndarray since' # pylint: disable=too-few-format-args
' input ndarrays are allocated on different devices: {} and {}'
.format(str(cur_ctx, tmp_ctx)))
return object.__class__(tmp), cur_ctx
else:
return object, cur_ctx
# Have to use 0 as default value for stype since pylint does not allow
# importing _STORAGE_TYPE_DEFAULT from ndarray.py.
def _np_ndarray_cls(handle, writable=True, stype=0):
if stype == -1:
stype = _storage_type(handle)
if stype != 0:
raise ValueError('_np_ndarray_cls currently only supports default storage '
'type, while received stype = {}'.format(stype))
return ndarray(handle, writable=writable)
_set_np_ndarray_class(_np_ndarray_cls)
_NUMPY_ARRAY_FUNCTION_DICT = {}
_NUMPY_ARRAY_UFUNC_DICT = {}
_FALLBACK_ARRAY_FUNCTION_WARNED_RECORD = {}
_FALLBACK_ARRAY_UFUNC_WARNED_RECORD = {}
def wrap_mxnp_np_ufunc(func):
"""
A convenience decorator for wrapping for python overload-able ops to provide type
casting for mixed use of mx_np and onp inputs.
Parameters
----------
func : a python overload-able binary function to be wrapped for type casting.
Returns
-------
Function
A function wrapped with type casted.
"""
@functools.wraps(func)
def _wrap_mxnp_np_ufunc(x1, x2):
if isinstance(x2, _np.ndarray):
x2 = _as_mx_np_array(x2, ctx=x1.ctx)
return func(x1, x2)
return _wrap_mxnp_np_ufunc
@set_module('mxnet.numpy') # pylint: disable=invalid-name
class ndarray(NDArray):
"""
ndarray(handle, writable=True):
An array object represents a multidimensional, homogeneous array of fixed-size items.
An associated data-type object describes the format of each element in the array
(its byte-order, how many bytes it occupies in memory, whether it is an integer, a
floating point number, or something else, etc.). Arrays should be constructed using
`array`, `zeros` or `empty`. Currently, only c-contiguous arrays are supported.
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `mxnet.numpy` module and examine the
methods and attributes of an array.
Parameters
----------
handle: int
The ndarray handle in backend (C++).
writable: bool
Indicates whether inplace-assignment is allowed for the array.
Attributes
----------
T : ndarray
Transpose of the array.
dtype : dtype object
Describes the format of the elements in the array.
size : int
Number of elements in the array.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
"""
@staticmethod
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # pylint: disable=bad-staticmethod-argument
"""
Dispatch official NumPy unary/binary operator calls on mxnet.numpy.ndarray
to this function. The operators must comply with the ufunc definition in NumPy.
The following code is adapted from CuPy.
Casting rules for operator with mx_np and onp (inplace op will keep its type)
| Expression | a type | b type | out type|
| --- | --- | --- | --- |
| `a += b` | onp | mx_np | onp |
| `a += b` | mx_np | onp | mx_np |
| `c = a + b` | onp | mx_np | mx_np |
| `c = a + b` | mx_np | onp | mx_np |
"""
ufunc_list = ["add", "subtract", "multiply", "divide", "true_divide", "floor_divide", "power",
"remainder", "bitwise_and", "bitwise_or", "bitwise_xor", "left_shift", "right_shift",
"greater", "greater_equal", "less", "less_equal", "not_equal", "equal", "matmul"]
if 'out' in kwargs:
# need to unfold tuple argument in kwargs
out = kwargs['out']
if len(out) != 1:
raise ValueError('The `out` parameter must have exactly one ndarray')
kwargs['out'] = out[0]
if method == '__call__':
name = ufunc.__name__
mx_ufunc = _NUMPY_ARRAY_UFUNC_DICT.get(name, None)
onp_op = _get_np_op(name)
if mx_ufunc is None:
# try to fallback to official NumPy op
if is_recording():
raise ValueError("Falling back to NumPy operator {} with autograd active is not supported."
"Please consider moving the operator to the outside of the autograd scope.")\
.format(name)
new_inputs = [arg.asnumpy() if isinstance(arg, ndarray) else arg for arg in inputs]
if onp_op not in _FALLBACK_ARRAY_UFUNC_WARNED_RECORD:
import logging
logging.warning("np.%s is a fallback operator, "
"which is actually using official numpy's implementation", name)
_FALLBACK_ARRAY_UFUNC_WARNED_RECORD[onp_op] = True
out = onp_op(*new_inputs, **kwargs)
return _as_mx_np_array(out, ctx=inputs[0].ctx)
# ops with np mx_np
elif name in ufunc_list and isinstance(inputs[0], _np.ndarray):
# inplace
if 'out' in kwargs:
new_inputs = [arg.asnumpy() if isinstance(arg, ndarray) else arg for arg in inputs]
return onp_op(*new_inputs, **kwargs)
else:
new_inputs = [_as_mx_np_array(arg, ctx=inputs[1].ctx)
if isinstance(arg, _np.ndarray) else arg for arg in inputs]
return mx_ufunc(*new_inputs, **kwargs)
else:
return mx_ufunc(*inputs, **kwargs)
else:
return NotImplemented
@staticmethod
def __array_function__(self, func, types, args, kwargs): # pylint: disable=bad-staticmethod-argument
"""
Dispatch official NumPy operators that comply with the array function protocol to
this function.
"""
mx_np_func = _NUMPY_ARRAY_FUNCTION_DICT.get(func, None)
func_name = func.__name__
if mx_np_func is None:
# try to fallback to official NumPy op
if is_recording():
raise ValueError("Falling back to NumPy operator {} with autograd active is not supported."
"Please consider moving the operator to the outside of the autograd scope.")\
.format(func)
new_args, cur_ctx = _as_onp_array(args)
if cur_ctx is None:
raise ValueError('Unknown context for the input ndarrays. It is probably a bug. Please'
' create an issue on GitHub.')
new_kwargs = {}
for k, v in kwargs.items():
new_kwargs[k] = v.asnumpy() if isinstance(v, ndarray) else v
if func not in _FALLBACK_ARRAY_FUNCTION_WARNED_RECORD:
import logging
logging.warning("np.%s is a fallback operator, "
"which is actually using official numpy's implementation.", func_name)
_FALLBACK_ARRAY_FUNCTION_WARNED_RECORD[func] = True
out = func(*new_args, **new_kwargs)
return _as_mx_np_array(out, ctx=cur_ctx)
else:
# Note: this allows subclasses that don't override
# __array_function__ to handle mxnet.numpy.ndarray objects
if not py_all(issubclass(t, ndarray) for t in types):
return NotImplemented
return mx_np_func(*args, **kwargs)
def _get_np_basic_indexing(self, key):
"""
This function indexes ``self`` with a tuple of `slice` objects only.
"""
key_nd = tuple(idx for idx in key if idx is not None)
if len(key_nd) < self.ndim:
raise RuntimeError(
'too few indices after normalization: expected `ndim` ({}) '
'but got {}. This is a bug, please report it!'
''.format(self.ndim, len(key_nd))
)
if len(key_nd) > self.ndim:
raise IndexError(
'too many indices ({}) for array with {} dimensions'
''.format(len(key_nd), self.ndim)
)
none_axes = [ax for ax in range(len(key)) if key[ax] is None] # pylint: disable=invalid-name
slc_key, int_axes = self._basic_indexing_key_int_to_slice(key_nd)
new_axes = self._new_axes_after_basic_indexing(none_axes, key)
# Check bounds for integer axes
for ax in int_axes: # pylint: disable=invalid-name
if not -self.shape[ax] <= key_nd[ax] < self.shape[ax]:
raise IndexError(
'index {} is out of bounds for axis {} with size {}'
''.format(key_nd[ax], ax, self.shape[ax]))
if self._basic_indexing_slice_is_contiguous(slc_key, self.shape):
# Create a shared-memory view by using low-level flat slicing
flat_begin, flat_end = self._basic_indexing_contiguous_flat_begin_end(
slc_key, self.shape
)
handle = NDArrayHandle()
flat_self = self.reshape_view(-1)
check_call(
_LIB.MXNDArraySlice(
flat_self.handle,
mx_uint(flat_begin),
mx_uint(flat_end),
ctypes.byref(handle),
)
)
sliced_shape = self._basic_indexing_sliced_shape(slc_key, self.shape)
sliced = self.__class__(handle=handle, writable=self.writable)
if 0 in sliced_shape:
sliced = sliced.reshape(sliced_shape)
else:
sliced = sliced.reshape_view(sliced_shape)
else:
begin, end, step = self._basic_indexing_key_to_begin_end_step(
slc_key, self.shape, keep_none=True
)
sliced = _npi.slice(self, begin, end, step)
# Reshape to final shape due to integer and `None` entries in `key`.
final_shape = [sliced.shape[i] for i in range(sliced.ndim) if i not in int_axes]
for ax in new_axes: # pylint: disable=invalid-name
final_shape.insert(ax, 1)
if sliced.size == 0:
return sliced.reshape(tuple(final_shape))
else:
return sliced.reshape_view(tuple(final_shape))
def _get_np_empty_tuple_indexing(self, key):
new_shape = []
num_none = 0
for i, idx in enumerate(key):
if idx is None:
new_shape.append(1) # expand dimension
num_none += 1
elif idx == ():
new_shape.append(0) # 0 shape
elif idx == slice(None, None, None):
new_shape.append(self.shape[i - num_none])
return empty(new_shape, dtype=self.dtype)
def _get_np_advanced_indexing(self, key):
idcs, new_axes = self._get_index_nd(key)
if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck
idcs = idcs.as_np_ndarray()
else:
idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])
sliced = _npi.gather_nd(self, idcs)
# Reshape due to `None` entries in `key`.
if new_axes:
final_shape = [sliced.shape[i] for i in range(sliced.ndim)]
for ax in new_axes: # pylint: disable=invalid-name
final_shape.insert(ax, 1)
return sliced.reshape(tuple(final_shape))
else:
return sliced
def _set_np_advanced_indexing(self, key, value):
"""This function is called by __setitem__ when key is an advanced index."""
idcs, new_axes = self._get_index_nd(key)
if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck
idcs = idcs.as_np_ndarray()
else:
idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])
vshape = get_oshape_of_gather_nd_op(self.shape, idcs.shape)
value_nd = self._prepare_value_nd(value, bcast_shape=vshape, squeeze_axes=new_axes)
self._scatter_set_nd(value_nd, idcs)
# pylint: disable=redefined-outer-name
def _get_np_boolean_indexing(self, key, ndim, shape):
"""
There are two types of boolean indices (which are equivalent,
for the most part though). This function will handle single
boolean indexing for higher speed.
If this is not the case, it is instead expanded into (multiple)
integer array indices and will be handled by advanced indexing.
"""
key_shape = key.shape
key_ndim = len(key_shape)
if ndim < key_ndim:
raise IndexError('too many indices, whose ndim = {}, for array with ndim = {}'
.format(key_ndim, ndim))
for i in range(key_ndim):
if key_shape[i] != shape[i]:
raise IndexError('boolean index did not match indexed array along dimension {};'
' dimension is {} but corresponding boolean dimension is {}'
.format(i, shape[i], key_shape[i]))
remaining_dims = shape[key_ndim:]
data = _reshape_view(self, -1, *remaining_dims)
key = _reshape_view(key, -1)
return _reshape_view(_npi.boolean_mask(data, key), -1, *remaining_dims)
def _set_np_boolean_indexing(self, key, value):
"""
There are two types of boolean indices (which are equivalent,
for the most part though). This function will handle single boolean assign for higher speed.
If this is not the case, it is instead expanded into (multiple)
integer array indices and will be handled by advanced assign.
"""
if isinstance(value, numeric_types):
_npi.boolean_mask_assign_scalar(data=self, mask=key,
value=int(value) if isinstance(value, bool) else value,
start_axis=0, out=self)
elif isinstance(value, ndarray):
_npi.boolean_mask_assign_tensor(data=self, mask=key, value=value, start_axis=0, out=self)
else:
raise NotImplementedError('type %s is not supported.'%(type(value)))
# pylint: disable=too-many-return-statements
def __getitem__(self, key):
"""Return self[key].
Returns a sliced view of this array if the elements fetched are contiguous in memory;
otherwise, returns a newly created NDArray.
This functions supports advanced indexing defined in the following reference with
some restrictions. Boolean indexing is supported only for a single boolean ndarray
as a key. Mixing boolean ndarray with other index types is not supported in ``advanced``
indexing.
For basic indexing, i.e., if ``key`` consists only of integers,
``slice``, ``Ellipsis`` (``...``) and ``None``, a mutable view is
returned that shares memory with this array if the accessed portion is
contiguous in memory.
Otherwise, a newly created ``ndarray`` is returned.
This functions supports advanced indexing as defined in `the NumPy
advanced indexing documentation
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_.
Parameters
----------
key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types
Indexing key.
Examples
--------
The default is to give explicit indices for all axes:
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> x[0, :2]
array([0., 1.])
>>> x[:, :-1]
array([[0., 1.],
[3., 4.]])
If fewer indices are given, they are automatically supplemented by an
appropriate number of ``slice(None)`` ("``:``") to the right. For
instance, a single integer indexes along the first axis:
>>> x[0]
array([0., 1., 2.])
>>> x[1:]
array([[3., 4., 5.]])
To omit a range of axes that should be kept as-is, an `Ellipsis`
("``...``") can be used:
>>> x = np.arange(16).reshape(2, 2, 2, 2)
>>> x[0, ..., 1]
array([[1., 3.],
[5., 7.]])
>>> x[0, :, :, 1] # equivalent
array([[1., 3.],
[5., 7.]])
New axes of length 1 can be created by inserting ``None``
(`numpy.newaxis`) in the index:
>>> x = np.arange(6).reshape(2, 3)
>>> x[None, :, :]
array([[[0., 1., 2.],
[3., 4., 5.]]])
>>> x[None, :, :].shape
(1, 2, 3)
If the indexed portion of the array is contiguous in memory, no data
is copied. Instead, a shared-memory view of the original array is
returned, and changes to that view affect the original array:
>>> x = np.arange(8).reshape(2, 2, 2)
>>> y = x[0] # contiguous
>>> y
array([[0., 1.],
[2., 3.]])
>>> y[:] = -1
>>> x
array([[[-1., -1.],
[-1., -1.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> x = np.arange(8).reshape(2, 2, 2)
>>> y = x[1, :1, :] # contiguous
>>> y
array([[4., 5.]])
>>> y[:] = -1
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[-1., -1.],
[ 6., 7.]]])
>>> x = np.arange(0, 8).reshape(2, 2, 2)
>>> y = x[:, :, 1] # not contiguous
>>> y
array([[1., 3.],
[5., 7.]])
>>> y[:] = -1
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
If the indexing key contains `list`, `numpy.ndarray` or `NDArray`
objects, advanced indexing is triggered, which always returns a
copy:
>>> x = np.arange(8).reshape(2, 2, 2)
>>> x[[0, 1]]
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> x[[0, 1], :] # equivalent
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> y = np.array([0, 1], dtype='int32')
>>> x[1:, y]
array([[[4., 5.],
[6., 7.]]])
>>> y = np.array([0, 1], dtype='int32')
>>> x[1:, y]
array([[[4., 5.],
[6., 7.]]])
Get negative elements in an ndarray through boolean array indexing
>>> x = np.array([1., -1., -2., 3])
>>> x[x < 0]
array([-1., -2.])
For more imformation related to boolean indexing, please refer to
https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html.
"""
ndim = self.ndim # pylint: disable=redefined-outer-name
shape = self.shape # pylint: disable=redefined-outer-name
if isinstance(key, bool): # otherwise will be treated as 0 and 1
key = array(key, dtype=_np.bool, ctx=self.ctx)
if isinstance(key, list):
try:
new_key = _np.array(key)
if new_key.dtype == _np.bool_:
key = new_key
except Exception as err:
raise TypeError('{}'.format(str(err)))
if isinstance(key, _np.ndarray):
if dc.is_deferred_compute():
raise TypeError('Indexing with a numpy array is not supported in HybridBlock.')
if key.dtype == _np.bool_:
key = array(key, dtype='bool', ctx=self.ctx)
# Handle single boolean index of matching dimensionality and size first for higher speed
# If the boolean array is mixed with other idices, it is instead expanded into (multiple)
# integer array indices and will be handled by advanced indexing.
# Come before the check self.dim == 0 as it also handle the 0-dim case.
if isinstance(key, ndarray) and key.dtype == _np.bool_:
return self._get_np_boolean_indexing(key, ndim, shape)
if ndim == 0 and key != ():
raise IndexError('scalar tensor can only accept `()` as index')
# Handle simple cases for higher speed
if isinstance(key, tuple) and len(key) == 0:
return self
if isinstance(key, tuple) and len(key) == ndim\
and py_all(isinstance(idx, integer_types) for idx in key):
out = self
for idx in key:
out = out[idx]
return out
if isinstance(key, integer_types):
if key > shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, shape[0]))
return self._at(key)
elif isinstance(key, py_slice):
if key.step is None or key.step == 1:
if key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
elif key.step == 0:
raise ValueError("slice step cannot be zero")
# For 0-d boolean indices: A new axis is added,
# but at the same time no axis is "used". So if we have True,
# we add a new axis (a bit like with np.newaxis). If it is
# False, we add a new axis, but this axis has 0 entries.
# prepend is defined to handle this case.
# prepend = _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar
# prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded
# prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be prepended
key, prepend = indexing_key_expand_implicit_axes(key, self.shape)
indexing_dispatch_code = get_indexing_dispatch_code(key)
if indexing_dispatch_code == _NDARRAY_EMPTY_TUPLE_INDEXING:
# won't be affected by zero-dim boolean indices
return self._get_np_empty_tuple_indexing(key)
elif indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return empty((0,) + self._get_np_basic_indexing(key).shape,
dtype=self.dtype, ctx=self.ctx)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE:
key = (_np.newaxis,) + key
return self._get_np_basic_indexing(key)
elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:
if dc.is_deferred_compute():
raise TypeError('Advanced indexing is not supported in HybridBlock.')
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return empty((0,) + self._get_np_adanced_indexing(key).shape,
dtype=self.dtype, ctx=self.ctx)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE:
key = (_np.newaxis,) + key
return self._get_np_advanced_indexing(key)
else:
raise RuntimeError
# pylint: disable=inconsistent-return-statements
def __setitem__(self, key, value):
"""Sets ``self[key]`` to ``value``.
This functions supports advanced indexing as defined in `the NumPy
advanced indexing documentation
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_,
with the restriction that boolean array indexing is not supported.
Parameters
----------
key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types
The indexing key.
value : scalar or array-like object that can be broadcast to the shape of self[key]
The value to set.
Examples
--------
>>> x = np.zeros((2, 3))
>>> x[:] = 1
>>> x
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> x[:, 1:2] = 2
>>> x
array([[ 1., 2., 1.],
[ 1., 2., 1.]])
>>> x[1:2, 1:] = 3
>>> x
array([[ 1., 2., 1.],
[ 1., 3., 3.]])
>>> x[1:, 0:2] = np.zeros((1, 2))
>>> x
array([[ 1., 2., 1.],
[ 0., 0., 3.]])
>>> x[1, 2] = 4
>>> x
array([[ 1., 2., 1.],
[ 0., 0., 4.]])
>>> x[[0], [1, 2]] = 5
>>> x
array([[ 1., 5., 5.],
[ 0., 0., 4.]])
>>> x[::-1, 0:2:2] = [6]
>>> x
array([[ 6., 5., 5.],
[ 6., 0., 4.]])
For imformation related to boolean indexing, please refer to
https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html.
"""
if isinstance(value, NDArray) and not isinstance(value, ndarray):
raise TypeError('Cannot assign mx.nd.NDArray to mxnet.numpy.ndarray')
if isinstance(key, bool): # otherwise will be treated as 0 and 1
key = array(key, dtype=_np.bool)
# Handle single boolean assign of matching dimensionality and size first for higher speed
# If the boolean array is mixed with other idices, it is instead expanded into (multiple)
# integer array indices and will be handled by advanced assign.
# Come before the check self.dim == 0 as it also handle the 0-dim case.
if isinstance(key, ndarray) and key.dtype == _np.bool:
return self._set_np_boolean_indexing(key, value)
# handle basic and advanced indexing
if self.ndim == 0:
if not isinstance(key, tuple) or len(key) != 0:
raise IndexError('scalar tensor can only accept `()` as index')
if isinstance(value, numeric_types):
self._full(value)
elif isinstance(value, ndarray) and value.size == 1:
if value.shape != self.shape:
value = value.reshape(self.shape)
value.copyto(self)
elif isinstance(value, (_np.ndarray, _np.generic)) and value.size == 1:
if isinstance(value, _np.generic) or value.shape != self.shape:
value = value.reshape(self.shape)
self._sync_copyfrom(value)
else:
raise ValueError('setting an array element with a sequence.')
else:
# For 0-d boolean indices: A new axis is added,
# but at the same time no axis is "used". So if we have True,
# we add a new axis (a bit like with np.newaxis). If it is
# False, we add a new axis, but this axis has 0 entries.
# prepend is defined to handle this case.
# prepend == _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar
# prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded
# prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be expanded
# prepend actually has no influence on __setitem__
key, prepend = indexing_key_expand_implicit_axes(key, self.shape)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return # no action is needed
slc_key = tuple(idx for idx in key if idx is not None)
if len(slc_key) < self.ndim:
raise RuntimeError(
'too few indices after normalization: expected `ndim` ({}) '
'but got {}. This is a bug, please report it!'
''.format(self.ndim, len(slc_key))
)
if len(slc_key) > self.ndim and self.ndim != 0:
raise IndexError(
'too many indices ({}) for array with {} dimensions'
''.format(len(slc_key), self.ndim)
)
indexing_dispatch_code = get_indexing_dispatch_code(slc_key)
if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:
self._set_nd_basic_indexing(key, value) # function is inheritated from NDArray class
elif indexing_dispatch_code == _NDARRAY_EMPTY_TUPLE_INDEXING:
pass # no action needed
elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:
self._set_np_advanced_indexing(key, value)
else:
raise ValueError(
'Indexing NDArray with index {} of type {} is not supported'
''.format(key, type(key))
)
def _prepare_value_nd(self, value, bcast_shape, squeeze_axes=None):
"""Return a broadcast `ndarray` with same context and dtype as ``self``.
For setting item, The returned `ndarray` is squeezed according to squeeze_axes since the
value_nd is assigned to not yet expanded space in original array.
`value`: numeric types or array like.
`bcast_shape`: a shape tuple.
`squeeze_axes`: a sequence of axes to squeeze in the value array.
Note: mxnet.numpy.ndarray not support NDArray as assigned value.
"""
if isinstance(value, numeric_types):
value_nd = full(bcast_shape, value, ctx=self.ctx, dtype=self.dtype)
elif isinstance(value, self.__class__):
value_nd = value.as_in_ctx(self.ctx)
if value_nd.dtype != self.dtype:
value_nd = value_nd.astype(self.dtype)
else:
try:
value_nd = array(value, ctx=self.ctx, dtype=self.dtype)
except:
raise TypeError('mxnet.np.ndarray does not support assignment with non-array-like '
'object {} of type {}'.format(value, type(value)))
# For advanced indexing setitem, if there is None in indices, we need to squeeze the
# assigned value_nd since None is also ignored in slicing the original array.
if squeeze_axes and value_nd.ndim > len(bcast_shape):
squeeze_axes = tuple([ax for ax in squeeze_axes if ax < len(value_nd.shape)])
value_nd = value_nd.squeeze(axis=tuple(squeeze_axes))
# handle the cases like the following
# a = np.zeros((3, 3)), b = np.ones((1, 1, 1, 1, 3)), a[0] = b
# b cannot broadcast directly to a[0].shape unless its leading 1-size axes are trimmed
if value_nd.ndim > len(bcast_shape):
squeeze_axes = []
for i in range(value_nd.ndim - len(bcast_shape)):
if value_nd.shape[i] == 1:
squeeze_axes.append(i)
else:
break
if squeeze_axes:
value_nd = value_nd.squeeze(squeeze_axes)
if value_nd.shape != bcast_shape:
if value_nd.size == 0:
value_nd = value_nd.reshape(bcast_shape)
else:
value_nd = value_nd.broadcast_to(bcast_shape)
return value_nd
@wrap_mxnp_np_ufunc
def __add__(self, other):
"""x.__add__(y) <=> x + y"""
return add(self, other)
@wrap_mxnp_np_ufunc
def __iadd__(self, other):
"""x.__iadd__(y) <=> x += y"""
if not self.writable:
raise ValueError('trying to add to a readonly ndarray')
return add(self, other, out=self)
def __invert__(self):
"""x.__invert__() <=> ~x"""
return invert(self)
@wrap_mxnp_np_ufunc
def __and__(self, other):
"""x.__and__(y) <=> x & y"""
return bitwise_and(self, other)
@wrap_mxnp_np_ufunc
def __or__(self, other):
"""x.__or__(y) <=> x | y"""
return bitwise_or(self, other)
@wrap_mxnp_np_ufunc
def __xor__(self, other):
"""x.__xor__(y) <=> x ^ y"""
return bitwise_xor(self, other)
@wrap_mxnp_np_ufunc
def __iand__(self, other):
"""x.__iand__(y) <=> x &= y"""
return bitwise_and(self, other, out=self)
@wrap_mxnp_np_ufunc
def __ior__(self, other):
"""x.__ior__(y) <=> x |= y"""
return bitwise_or(self, other, out=self)
@wrap_mxnp_np_ufunc
def __ixor__(self, other):
"""x.__ixor__(y) <=> x ^= y"""
return bitwise_xor(self, other, out=self)
def __round__(self, n=0):
"""x.__round__(n)"""
return round(self, decimals=n)
def __abs__(self):
"""x.__abs__()"""
return absolute(self)
def __ceil__(self):
"""x.__ceil__()"""
return ceil(self)
def __floor__(self):
"""x.__floor__()"""
return floor(self)
def __trunc__(self):
"""x.__trunc__()"""
return trunc(self)
@wrap_mxnp_np_ufunc
def __sub__(self, other):
"""x.__sub__(y) <=> x - y"""
return subtract(self, other)
@wrap_mxnp_np_ufunc
def __isub__(self, other):
"""x.__isub__(y) <=> x -= y"""
if not self.writable:
raise ValueError('trying to subtract from a readonly ndarray')
return subtract(self, other, out=self)
@wrap_mxnp_np_ufunc
def __rsub__(self, other):
"""x.__rsub__(y) <=> y - x"""
return subtract(other, self)
@wrap_mxnp_np_ufunc
def __mul__(self, other):
"""x.__mul__(y) <=> x * y"""
return multiply(self, other)
def __neg__(self):
return negative(self)
@wrap_mxnp_np_ufunc
def __imul__(self, other):
"""x.__imul__(y) <=> x *= y"""
if not self.writable:
raise ValueError('trying to add to a readonly ndarray')
return multiply(self, other, out=self)
@wrap_mxnp_np_ufunc
def __rmul__(self, other):
"""x.__rmul__(y) <=> y * x"""
return self.__mul__(other)
@wrap_mxnp_np_ufunc
def __div__(self, other):
"""x.__div__(y) <=> x / y"""
return divide(self, other)
@wrap_mxnp_np_ufunc
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y / x"""
return divide(other, self)
@wrap_mxnp_np_ufunc
def __idiv__(self, other):
"""x.__idiv__(y) <=> x /= y"""
return divide(self, other, out=self)
@wrap_mxnp_np_ufunc
def __truediv__(self, other):
"""x.__truediv__(y) <=> x / y"""
return divide(self, other)
@wrap_mxnp_np_ufunc
def __rtruediv__(self, other):
"""x.__rtruediv__(y) <=> y / x"""
return divide(other, self)
@wrap_mxnp_np_ufunc
def __itruediv__(self, other):
"""x.__itruediv__(y) <=> x /= y"""
return divide(self, other, out=self)
@wrap_mxnp_np_ufunc
def __mod__(self, other):
"""x.__mod__(y) <=> x % y"""
return mod(self, other)
@wrap_mxnp_np_ufunc
def __rmod__(self, other):
"""x.__rmod__(y) <=> y % x"""
return mod(other, self)
@wrap_mxnp_np_ufunc
def __imod__(self, other):
"""x.__imod__(y) <=> x %= y"""
return mod(self, other, out=self)
@wrap_mxnp_np_ufunc
def __pow__(self, other):
"""x.__pow__(y) <=> x ** y"""
return power(self, other)
@wrap_mxnp_np_ufunc
def __rpow__(self, other):
"""x.__rpow__(y) <=> y ** x"""
return power(other, self)
@wrap_mxnp_np_ufunc
def __eq__(self, other):
"""x.__eq__(y) <=> x == y"""
return equal(self, other)
def __hash__(self):
raise NotImplementedError
@wrap_mxnp_np_ufunc
def __ne__(self, other):
"""x.__ne__(y) <=> x != y"""
return not_equal(self, other)
@wrap_mxnp_np_ufunc
def __gt__(self, other):
"""x.__gt__(y) <=> x > y"""
return greater(self, other)
@wrap_mxnp_np_ufunc
def __ge__(self, other):
"""x.__ge__(y) <=> x >= y"""
return greater_equal(self, other)
@wrap_mxnp_np_ufunc
def __lt__(self, other):
"""x.__lt__(y) <=> x < y"""
return less(self, other)
@wrap_mxnp_np_ufunc
def __le__(self, other):
"""x.__le__(y) <=> x <= y"""
return less_equal(self, other)
@wrap_mxnp_np_ufunc
def __matmul__(self, other):
"""x.__matmul__(y) <=> x @ y"""
return matmul(self, other)
@wrap_mxnp_np_ufunc
def __rmatmul__(self, other):
"""x.__rmatmul__(y) <=> y @ x"""
return matmul(other, self)
@wrap_mxnp_np_ufunc
def __imatmul__(self, other):
"""x.__imatmul__(y) <=> x @= y"""
return matmul(self, other, out=self)
def __bool__(self):
num_elements = self.size
if num_elements == 0:
warnings.simplefilter('default')
warnings.warn('The truth value of an empty array is ambiguous. Returning False, but in'
' future this will result in an error.', DeprecationWarning)
return False
elif num_elements == 1:
return bool(self.item())
else:
raise ValueError("The truth value of an ndarray with multiple elements is ambiguous.")
__nonzero__ = __bool__
def __float__(self):
num_elements = self.size
if num_elements != 1:
raise TypeError('only size-1 arrays can be converted to Python scalars')
return float(self.item())
def __int__(self):
num_elements = self.size
if num_elements != 1:
raise TypeError('only size-1 arrays can be converted to Python scalars')
return int(self.item())
def __len__(self):
"""Number of elements along the first axis."""
shape = self.shape # pylint: disable=redefined-outer-name
if len(shape) == 0:
raise TypeError('len() of unsized object')
return self.shape[0]
def __reduce__(self):
return ndarray, (None,), self.__getstate__()
def item(self, *args):
"""Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
*args : Arguments (variable number and type)
none: in this case, the method only works for arrays with one element (a.size == 1),
which element is copied into a standard Python scalar object and returned.
int_type: this argument is interpreted as a flat index into the array, specifying which
element to copy and return.
tuple of int_types: functions as does a single int_type argument, except that the
argument is interpreted as an nd-index into the array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable Python scalar.
"""
# TODO(junwu): no need to call asnumpy() on the whole array.
return self.asnumpy().item(*args)
def nonzero(self):
"""Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""
return nonzero(self)
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Same as self.transpose(). This always returns a copy of self."""
return self.transpose()
# pylint: enable= invalid-name, undefined-variable
def all(self, axis=None, out=None, keepdims=False):
return _mx_nd_np.all(self, axis=axis, out=out, keepdims=keepdims)
def any(self, axis=None, out=None, keepdims=False):
return _mx_nd_np.any(self, axis=axis, out=out, keepdims=keepdims)
def as_nd_ndarray(self):
"""Convert mxnet.numpy.ndarray to mxnet.ndarray.NDArray to use its fluent methods."""
hdl = NDArrayHandle()
check_call(_LIB.MXShallowCopyNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(handle=hdl, writable=self.writable)
def as_np_ndarray(self):
"""A convenience function for creating a numpy ndarray from the current ndarray
with zero copy. For this class, it just returns itself since it's already a
numpy ndarray."""
return self
def __repr__(self):
"""
Returns a string representation of the array.
The dtype of the ndarray will be appended if it's inconsistent with current dtype.
The context of the ndarray will be appended for devices other than CPU.
Examples
--------
>>> from mxnet import np, npx
>>> a = np.random.uniform(size=(2, 3))
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]])
>>> print(a)
[[0.5488135 0.5928446 0.71518934]
[0.84426576 0.60276335 0.8579456 ]]
>>> a.dtype
dtype('float32')
>>> npx.set_np_float64()
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]], dtype=float32)
>>> npx.set_np_float64(default_float64=False)
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]])
>>> b = a.astype(np.float64)
>>> b
array([[0.54881352, 0.59284461, 0.71518934],
[0.84426576, 0.60276335, 0.85794562]], dtype=float64)
>>> print(b)
[[0.54881352 0.59284461 0.71518934]
[0.84426576 0.60276335 0.85794562]]
>>> b.dtype
dtype('float64')
>>> c = a.copyto(npx.gpu(0))
>>> c
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]], ctx=gpu(0))
>>> print(c)
[[0.5488135 0.5928446 0.71518934]
[0.84426576 0.60276335 0.8579456 ]] @gpu(0)
>>> d = b.copyto(npx.gpu(0))
>>> d
array([[0.54881352, 0.59284461, 0.71518934],
[0.84426576, 0.60276335, 0.85794562]], dtype=float64, ctx=gpu(0))
>>> print(d)
[[0.54881352 0.59284461 0.71518934]
[0.84426576 0.60276335 0.85794562]] @gpu(0)
"""
array_str = self.asnumpy().__repr__()
dtype = self.dtype
default_dtype = _np.float64 if is_np_default_dtype() else _np.float32
if 'dtype=' in array_str:
if dtype == default_dtype:
array_str = array_str[:array_str.rindex(',')] + ')'
elif dtype not in (default_dtype, _np.bool_):
array_str = array_str[:-1] + ', dtype={})'.format(dtype)
context = self.ctx
if context.device_type == 'cpu':
return array_str
return array_str[:-1] + ', ctx={})'.format(str(context))
def __str__(self):
"""Returns a string representation of the array."""
array_str = self.asnumpy().__str__()
context = self.ctx
if context.device_type == 'cpu' or self.ndim == 0:
return array_str
return '{array} @{ctx}'.format(array=array_str, ctx=context)
def __format__(self, fmt):
"""Return value.__format__(format_spec). Overwrite to include 0-d array"""
if self.ndim == 0:
return self.item().__format__(fmt)
elif len(fmt) == 0:
return self.__str__().__format__(fmt)
else:
raise TypeError("Cannot format mxnet.numpy.ndarray with format_spec")
def attach_grad(self, grad_req='write'): # pylint: disable=arguments-differ
"""Attach a gradient buffer to this ndarray, so that `backward`
can compute gradient with respect to it.
Parameters
----------
grad_req : {'write', 'add', 'null'}
How gradient will be accumulated.
- 'write': gradient will be overwritten on every backward.
- 'add': gradient will be added to existing value on every backward.
- 'null': do not compute gradient for this NDArray.
"""
grad = _mx_nd_np.zeros_like(self) # pylint: disable=undefined-variable
grad_req = _GRAD_REQ_MAP[grad_req]
check_call(_LIB.MXAutogradMarkVariables(
1, ctypes.pointer(self.handle),
ctypes.pointer(mx_uint(grad_req)),
ctypes.pointer(grad.handle)))
@property
def grad(self):
"""Returns gradient buffer attached to this ndarray."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))
if hdl.value is None:
return None
return _np_ndarray_cls(hdl)
def detach(self):
"""Returns a new ndarray, detached from the current graph."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))
return _np_ndarray_cls(hdl)
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): # pylint: disable=arguments-differ,unused-argument, too-many-arguments
"""
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array with `dtype`.
Notes
-----
This function differs from the official `ndarray`'s ``astype`` function in the following
aspects:
- `order` only supports 'C' and 'K'.
- `casting` only supports 'unsafe'.
- `subok` only supports ``True``.
"""
if order is not None and order != 'K' and order != 'C':
raise ValueError('order must be either \'K\' or \'C\'')
if casting != 'unsafe':
raise ValueError('casting must be equal to \'unsafe\'')
if not subok:
raise ValueError('subok must be equal to True')
if dtype is None:
dtype = _np.float32
if not copy and _np.dtype(dtype) == self.dtype:
return self
return _npi.cast(self, dtype=dtype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``ndarray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``np.ndarray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : ndarray or Context
The destination array or context.
Returns
-------
out: ndarray
The copied array. If ``other`` is an ``ndarray``, then the return value
and ``other`` will point to the same ``ndarray``.
Examples
--------
>>> x = np.ones((2, 3))
>>> y = np.zeros((2, 3), ctx=npx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if isinstance(other, ndarray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return False
return _npi.copyto(self, out=other)
elif isinstance(other, Context):
hret = ndarray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _npi.copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def asscalar(self):
raise AttributeError('mxnet.numpy.ndarray object has no attribute asscalar')
def argmax(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the maximum values along the given axis.
Refer to `mxnet.numpy.argmax` for full documentation."""
return argmax(self, axis, out)
def as_in_context(self, context):
"""This function has been deprecated. Please refer to ``ndarray.as_in_ctx``."""
warnings.warn('ndarray.as_in_context has been renamed to'
' ndarray.as_in_ctx', DeprecationWarning)
return self.as_nd_ndarray().as_in_context(context).as_np_ndarray()
def as_in_ctx(self, ctx):
"""Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
ndarray
The target array.
"""
if self.ctx == ctx:
return self
return self.copyto(ctx)
@property
def ctx(self):
"""Device context of the array.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.ctx
cpu(0)
>>> type(x.ctx)
<class 'mxnet.context.Context'>
>>> y = np.zeros((2, 3), npx.gpu(0))
>>> y.ctx
gpu(0)
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value)
@property
def context(self):
"""This function has been deprecated. Please refer to ``ndarray.ctx``."""
warnings.warn('ndarray.context has been renamed to ndarray.ctx', DeprecationWarning)
return self.as_nd_ndarray().context
def copy(self, order='C'): # pylint: disable=arguments-differ
"""Return a coyp of the array, keeping the same context.
Parameters
----------
order : str
The memory layout of the copy. Currently, only c-contiguous memory
layout is supported.
Examples
--------
>>> x = np.ones((2, 3))
>>> y = x.copy()
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if order != 'C':
raise NotImplementedError('ndarray.copy only supports order=\'C\', while '
'received {}'.format(str(order)))
return self.copyto(self.ctx)
def dot(self, b, out=None):
"""Dot product of two arrays.
Refer to ``numpy.dot`` for full documentation."""
return _mx_np_op.dot(self, b, out=out)
def reshape(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Returns a copy of the array with a new shape.
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""
order = 'C'
if len(kwargs) > 1:
raise TypeError('function takes at most 1 keyword argument')
if len(kwargs) == 1:
if 'order' not in kwargs:
raise TypeError("'{}' is an invalid keyword argument for this function"
.format(list(kwargs.keys())[0]))
order = kwargs.pop('order', 'C')
if order != 'C':
raise NotImplementedError('only supports C-order,'
' while received {}'.format(order))
if len(args) == 0:
raise TypeError('reshape() takes exactly 1 argument (0 given)')
if len(args) == 1 and isinstance(args[0], tuple):
return _mx_np_op.reshape(self, newshape=args[0], order=order)
else:
return _mx_np_op.reshape(self, newshape=args, order=order)
def reshape_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reshape_like`.
The arguments are the same as for :py:func:`reshape_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute reshape_like')
def reshape_view(self, *shape, **kwargs): # pylint: disable=redefined-outer-name
"""Returns a **view** of this array with a new shape without altering any data.
Inheritated from NDArray.reshape.
"""
return super(ndarray, self).reshape(*shape, **kwargs)
def zeros_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`zeros_like`.
The arguments are the same as for :py:func:`zeros_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute zeros_like')
def ones_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ones_like`.
The arguments are the same as for :py:func:`ones_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute ones_like')
def broadcast_axes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_axes`.
The arguments are the same as for :py:func:`broadcast_axes`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')
def repeat(self, repeats, axis=None): # pylint: disable=arguments-differ
"""Repeat elements of an array."""
return repeat(self, repeats=repeats, axis=axis)
def pad(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pad`.
The arguments are the same as for :py:func:`pad`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute pad')
def swapaxes(self, axis1, axis2): # pylint: disable=arguments-differ
"""Return a copy of the array with axis1 and axis2 interchanged.
Refer to `mxnet.numpy.swapaxes` for full documentation.
"""
return swapaxes(self, axis1, axis2)
def split(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute split')
def split_v2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split_v2`.
The arguments are the same as for :py:func:`split_v2`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute split_v2')
def slice(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice`.
The arguments are the same as for :py:func:`slice`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice')
def slice_axis(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_axis`.
The arguments are the same as for :py:func:`slice_axis`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_axis')
def slice_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_like`.
The arguments are the same as for :py:func:`slice_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_like')
def slice_assign_scalar(self, value, begin, end, step):
"""
Assign the scalar to a cropped subset of this ndarray. Value will broadcast to the shape of the cropped shape
and will be cast to the same dtype of the ndarray.
Parameters
----------
value: numeric value
Value and this ndarray should be of the same data type.
The shape of rhs should be the same as the cropped shape of this ndarray.
begin: tuple of begin indices
end: tuple of end indices
step: tuple of step lenghths
Returns
-------
This ndarray.
Examples
--------
>>> x = np.ones((2, 2, 2))
>>> y = x.slice_assign_scalar(0, (0, 0, None), (1, 1, None), (None, None, None))
>>> y
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> x
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return _npi.slice_assign_scalar(self, value, begin=begin, end=end, step=step, out=self)
def slice_assign(self, rhs, begin, end, step):
"""
Assign the rhs to a cropped subset of this ndarray in place.
Returns the view of this ndarray.
Parameters
----------
rhs: ndarray.
rhs and this NDArray should be of the same data type, and on the same device.
The shape of rhs should be the same as the cropped shape of this ndarray.
begin: tuple of begin indices
end: tuple of end indices
step: tuple of step lenghths
Returns
-------
out : ndarray
This ndarray.
Examples
--------
>>> x = np.ones((2, 2, 2))
>>> assigned = np.zeros((1, 1, 2))
>>> y = x.slice_assign(assigned, (0, 0, None), (1, 1, None), (None, None, None))
>>> y
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> x
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return _npi.slice_assign(self, rhs, begin=begin, end=end, step=step, out=self)
def take(self, indices, axis=None, mode='raise'): # pylint: disable=arguments-differ, redefined-outer-name
"""Convenience fluent method for :py:func:`take`.
The arguments are the same as for :py:func:`take`, with
this array as data.
"""
return take(self, indices, axis, mode=mode)
def one_hot(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`one_hot`.
The arguments are the same as for :py:func:`one_hot`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute one_hot')
def pick(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pick`.
The arguments are the same as for :py:func:`pick`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute pick')
def sort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`sort`.
The arguments are the same as for :py:func:`sort`, with
this array as data.
"""
raise sort(self, axis=axis, kind=kind, order=order)
def topk(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`topk`.
The arguments are the same as for :py:func:`topk`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute topk')
def argsort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`argsort`.
The arguments are the same as for :py:func:`argsort`, with
this array as data.
"""
return argsort(self, axis=axis, kind=kind, order=order)
def argmax_channel(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax_channel`.
The arguments are the same as for :py:func:`argmax_channel`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute argmax_channel')
def argmin(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the minium values along the given axis.
Refer to `mxnet.numpy.argmin` for full documentation."""
return argmin(self, axis, out)
def clip(self, min=None, max=None, out=None): # pylint: disable=arguments-differ
"""Return an array whose values are limited to [min, max].
One of max or min must be given.
"""
return clip(self, min, max, out=out)
def abs(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`abs`.
The arguments are the same as for :py:func:`abs`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute abs')
def sign(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sign`.
The arguments are the same as for :py:func:`sign`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sign')
def flatten(self, order='C'): # pylint: disable=arguments-differ
"""Return a copy of the array collapsed into one dimension."""
return self.reshape(-1, order=order)
def shape_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`shape_array`.
The arguments are the same as for :py:func:`shape_array`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute shape_array')
def size_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`size_array`.
The arguments are the same as for :py:func:`size_array`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute size_array')
def expand_dims(self, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument
"""Convenience fluent method for :py:func:`expand_dims`.
The arguments are the same as for :py:func:`expand_dims`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute expand_dims')
def tile(self, reps): # pylint: disable=arguments-differ
"""Construct an array by repeating A the number of times given by reps.
Refer to `mxnet.numpy.tile` for full documentation."""
return tile(self, reps=reps)
def transpose(self, *axes): # pylint: disable=arguments-differ
"""Permute the dimensions of an array."""
if len(axes) == 0:
axes = None
elif len(axes) == 1:
if isinstance(axes[0], (tuple, list)):
axes = axes[0]
elif axes[0] is None:
axes = None
return transpose(self, axes=axes)
def flip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flip`.
The arguments are the same as for :py:func:`flip`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute flip')
def depth_to_space(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`depth_to_space`.
The arguments are the same as for :py:func:`depth_to_space`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute depth_to_space')
def space_to_depth(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`space_to_depth`.
The arguments are the same as for :py:func:`space_to_depth`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute space_to_depth')
def diag(self, k=0, **kwargs):
"""Convenience fluent method for :py:func:`diag`.
The arguments are the same as for :py:func:`diag`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute diag')
def sum(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the sum of the array elements over the given axis."""
return sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nansum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nansum`.
The arguments are the same as for :py:func:`nansum`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute nansum')
def prod(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the product of the array elements over the given axis."""
return _mx_np_op.prod(self, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
def nanprod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nanprod`.
The arguments are the same as for :py:func:`nanprod`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute nanprod')
def mean(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Returns the average of the array elements along given axis."""
return mean(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# pylint: disable=too-many-arguments, arguments-differ
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""Returns the standard deviation of the array elements along given axis."""
return std(self, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""Returns the variance of the array elements, along given axis."""
return var(self, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
# pylint: enable=too-many-arguments, arguments-differ
def cumsum(self, axis=None, dtype=None, out=None):
"""Return the cumulative sum of the elements along the given axis."""
return _mx_nd_np.cumsum(self, axis=axis, dtype=dtype, out=out)
def tolist(self):
return self.asnumpy().tolist()
def max(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the maximum along a given axis."""
return _mx_nd_np.max(self, axis=axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`min`.
The arguments are the same as for :py:func:`min`, with
this array as data.
"""
return _mx_nd_np.min(self, axis=axis, out=out, keepdims=keepdims)
def norm(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`norm`.
The arguments are the same as for :py:func:`norm`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute norm')
def round(self, decimals=0, out=None, **kwargs): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`round`.
The arguments are the same as for :py:func:`round`, with
this array as data.
"""
return round(self, decimals=decimals, out=out, **kwargs)
def rint(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rint`.
The arguments are the same as for :py:func:`rint`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rint')
def fix(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`fix`.
The arguments are the same as for :py:func:`fix`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute fix')
def floor(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`floor`.
The arguments are the same as for :py:func:`floor`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute floor')
def ceil(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ceil`.
The arguments are the same as for :py:func:`ceil`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute ceil')
def trunc(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`trunc`.
The arguments are the same as for :py:func:`trunc`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute trunc')
def sin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sin`.
The arguments are the same as for :py:func:`sin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sin')
def cos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cos`.
The arguments are the same as for :py:func:`cos`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cos')
def tan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tan`.
The arguments are the same as for :py:func:`tan`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tan')
def arcsin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsin`.
The arguments are the same as for :py:func:`arcsin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsin')
def arccos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccos`.
The arguments are the same as for :py:func:`arccos`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arccos')
def arctan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctan`.
The arguments are the same as for :py:func:`arctan`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arctan')
def degrees(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`degrees`.
The arguments are the same as for :py:func:`degrees`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute degrees')
def radians(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`radians`.
The arguments are the same as for :py:func:`radians`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute radians')
def sinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sinh`.
The arguments are the same as for :py:func:`sinh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sinh')
def cosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cosh`.
The arguments are the same as for :py:func:`cosh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cosh')
def tanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tanh`.
The arguments are the same as for :py:func:`tanh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tanh')
def arcsinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsinh`.
The arguments are the same as for :py:func:`arcsinh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsinh')
def arccosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccosh`.
The arguments are the same as for :py:func:`arccosh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arccosh')
def arctanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctanh`.
The arguments are the same as for :py:func:`arctanh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arctanh')
def exp(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`exp`.
The arguments are the same as for :py:func:`exp`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute exp')
def expm1(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expm1`.
The arguments are the same as for :py:func:`expm1`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute expm1')
def log(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log`.
The arguments are the same as for :py:func:`log`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log')
def log10(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log10`.
The arguments are the same as for :py:func:`log10`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log10')
def log2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log2`.
The arguments are the same as for :py:func:`log2`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log2')
def log1p(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log1p`.
The arguments are the same as for :py:func:`log1p`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log1p')
def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
The arguments are the same as for :py:func:`sqrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sqrt')
def rsqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rsqrt`.
The arguments are the same as for :py:func:`rsqrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rsqrt')
def cbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cbrt`.
The arguments are the same as for :py:func:`cbrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cqrt')
def rcbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rcbrt`.
The arguments are the same as for :py:func:`rcbrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rcqrt')
def square(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`square`.
The arguments are the same as for :py:func:`square`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute square')
def reciprocal(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reciprocal`.
The arguments are the same as for :py:func:`reciprocal`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute reciprocal')
def relu(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`relu`.
The arguments are the same as for :py:func:`relu`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute relu')
def sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sigmoid`.
The arguments are the same as for :py:func:`sigmoid`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sigmoid')
def softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmax`.
The arguments are the same as for :py:func:`softmax`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute softmax')
def log_softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_softmax`.
The arguments are the same as for :py:func:`log_softmax`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log_softmax')
def softmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmin`.
The arguments are the same as for :py:func:`softmin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute softmin')
def squeeze(self, axis=None): # pylint: disable=arguments-differ
"""Remove single-dimensional entries from the shape of a."""
return squeeze(self, axis=axis)
def broadcast_to(self, shape): # pylint: disable=redefined-outer-name
return _mx_nd_np.broadcast_to(self, shape)
def broadcast_like(self, other):
raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')
def _full(self, value):
"""
Currently for internal use only. Implemented for __setitem__.
Assign to self an array of self's same shape and type, filled with value.
"""
return _mx_nd_np.full(self.shape, value, ctx=self.ctx, dtype=self.dtype, out=self)
# pylint: disable=redefined-outer-name
def _scatter_set_nd(self, value_nd, indices):
"""
This is added as an ndarray class method in order to support polymorphism in NDArray and numpy.ndarray indexing
"""
return _npi.scatter_set_nd(
lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self
)
# pylint: enable=redefined-outer-name
@property
def shape(self):
return super(ndarray, self).shape
@property
def ndim(self):
"""Number of array dimensions."""
return len(self.shape)
@property
def size(self):
"""Number of elements in the array."""
return super(ndarray, self).size
@property
def dtype(self):
"""Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = np.zeros((2,3))
>>> x.dtype
dtype('float32')
>>> y = np.zeros((2,3), dtype='int32')
>>> y.dtype
dtype('int32')
"""
return _np.dtype(super(ndarray, self).dtype)
def tostype(self, stype):
raise AttributeError('mxnet.numpy.ndarray object has no attribute tostype')
@set_module('mxnet.numpy')
def empty(shape, dtype=float, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`.
Note that this behavior is different from NumPy's `empty` function where `float64`
is the default value, here you can set your default dtype as 'float32' or 'float64'
because `float32` is considered as the default data type in deep learning.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and order.
Examples
--------
>>> np.empty([2, 2])
array([[ 0.000000e+00, -2.524355e-29],
[ nan, -8.592023e+09]]) # uninitialized
>>> np.empty([2, 2], dtype=int)
array([[8751743591039004782, 3196766424264760104],
[7583328881310196768, 562950123910254]], dtype=int64) # uninitialized
"""
if order != 'C':
raise NotImplementedError('`empty` only supports order equal to `C`, while received {}'
.format(str(order)))
if ctx is None:
ctx = current_context()
if dtype is None or dtype is float:
dtype = _np.float64 if is_np_default_dtype() else _np.float32
if isinstance(shape, int):
shape = (shape,)
return ndarray(handle=_new_alloc_handle(shape, ctx, False, dtype))
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def array(object, dtype=None, ctx=None):
"""
Create an array.
Parameters
----------
object : array_like or `numpy.ndarray` or `mxnet.numpy.ndarray`
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array.
The default dtype is ``object.dtype`` if `object` is an `ndarray`, `float32` otherwise.
Default dtype can be set to be consistent with offical numpy by `npx.set_np(dtype=True)`.
- When npx.is_np_default_dtype() returns False, default dtype is float32;
- When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
Examples
--------
>>> np.array([1, 2, 3])
array([1., 2., 3.])
>>> np.array([[1, 2], [3, 4]])
array([[1., 2.],
[3., 4.]])
>>> np.array([[1, 0], [0, 1]], dtype=bool)
array([[ True, False],
[False, True]])
>>> np.array([1, 2, 3]).dtype
dtype('float32')
>>> npx.set_np(dtype=True)
>>> np.array([1, 2, 3]).dtype
dtype('float64')
"""
if ctx is None:
ctx = current_context()
if isinstance(object, _np.ndarray):
if is_np_default_dtype():
dtype = object.dtype if dtype is None else dtype
else:
dtype = _np.float32 if dtype is None or object.dtype is _np.float64 else dtype
if isinstance(object, ndarray):
dtype = object.dtype if dtype is None else dtype
elif isinstance(object, NDArray):
raise ValueError("If you're trying to create a mxnet.numpy.ndarray "
"from mx.nd.NDArray, please use the zero-copy as_np_ndarray function.")
else:
if dtype is None:
default_dtype = _np.float64 if is_np_default_dtype() else _np.float32
dtype = object.dtype if hasattr(object, "dtype") else default_dtype
try:
object = _np.array(object, dtype=dtype)
except Exception as e:
# printing out the error raised by official NumPy's array function
# for transparency on users' side
raise TypeError('{}'.format(str(e)))
ret = empty(object.shape, dtype=dtype, ctx=ctx)
if len(object.shape) == 0:
ret[()] = object
else:
ret[:] = object
return ret
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
"""
return _mx_nd_np.shape(a)
@set_module('mxnet.numpy')
def zeros(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that this behavior is different from NumPy's `zeros` function where `float64`
is the default value, here we can set 'float32' or 'float64' as your default dtype,
because `float32` is considered as the default data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and ctx.
Examples
--------
>>> np.zeros(5)
array([0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0], dtype=int64)
>>> np.zeros((2, 1))
array([[0.],
[0.]])
"""
return _mx_nd_np.zeros(shape, dtype, order, ctx)
@set_module('mxnet.numpy')
def ones(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with ones.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is depend on your current default dtype.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that this behavior is different from NumPy's `ones` function where
`float64` is the default value.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and ctx.
Examples
--------
>>> np.ones(5)
array([1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1], dtype=int64)
>>> np.ones((2, 1))
array([[1.],
[1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[1., 1.],
[1., 1.]])
"""
return _mx_nd_np.ones(shape, dtype, order, ctx)
@set_module('mxnet.numpy')
def broadcast_to(array, shape): # pylint: disable=redefined-outer-name
"""
Broadcast an array to a new shape.
Parameters
----------
array : ndarray or scalar
The array to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
MXNetError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
"""
return _mx_nd_np.broadcast_to(array, shape)
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or ndarray
Fill value.
dtype : data-type, optional
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
If `fill_value` is an ndarray, out will have the same context as `fill_value`
regardless of the provided `ctx`.
Notes
-----
This function differs from the original `numpy.full
https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in
the following way(s):
- Has an additional `ctx` argument to specify the device
- Has an additional `out` argument
- Currently does not support `order` selection
See Also
--------
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), 10)
array([[10., 10.],
[10., 10.]])
>>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))
array([[2, 2],
[2, 2]], dtype=int32)
"""
return _mx_nd_np.full(shape, fill_value, order=order, ctx=ctx, dtype=dtype, out=out)
# pylint: enable=too-many-arguments, redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : ndarray
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
subok : {False}, optional
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to False.
(Only support False at this moment)
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
(Not supported at this moment)
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.empty_like(a)
array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized
[ 4567052944, -5764607523034234880, 844424930131968]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized
[2.0e-323, 2.5e-323, 3.0e-323]])
"""
return _mx_nd_np.empty_like(prototype, dtype=dtype, order=order, subok=subok, shape=shape)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def all(a, axis=None, out=None, keepdims=False):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (axis = None) is to perform a logical AND over
all the dimensions of the input array.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
Returns
--------
all : ndarray, bool
A new boolean or array is returned unless out is specified,
in which case a reference to out is returned.
Examples:
---------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False])
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
"""
return _mx_nd_np.all(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def any(a, axis=None, out=None, keepdims=False):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless axis is not None
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (axis = None) is to perform a logical AND over
all the dimensions of the input array.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
Returns
--------
any : bool or ndarray
A new boolean or ndarray is returned unless out is specified,
in which case a reference to out is returned.
Examples:
---------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False])
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
return _mx_nd_np.any(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def identity(n, dtype=None, ctx=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
return _mx_nd_np.identity(n, dtype, ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def take(a, indices, axis=None, mode='raise', out=None):
r"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : ndarray
The source array.
indices : ndarray
The indices of the values to extract. Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'clip', 'wrap'}, optional
Specifies how out-of-bounds indices will behave.
* 'clip' -- clip to the range (default)
* 'wrap' -- wrap around
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray
The returned array has the same type as `a`.
Notes
-----
This function differs from the original `numpy.take
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in
the following way(s):
- Only ndarray or scalar ndarray is accepted as valid input.
Examples
--------
>>> a = np.array([4, 3, 5, 7, 6, 8])
>>> indices = np.array([0, 1, 4])
>>> np.take(a, indices)
array([4., 3., 6.])
In this example for `a` is an ndarray, "fancy" indexing can be used.
>>> a[indices]
array([4., 3., 6.])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, np.array([[0, 1], [2, 3]]))
array([[4., 3.],
[5., 7.]])
"""
return _mx_nd_np.take(a, indices, axis, mode, out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : ndarray
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. The default is None.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
This function differs from the original `numpy.unique
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in
the following aspects:
- Only support ndarray as input.
- Object arrays or structured arrays are not supported.
Examples
--------
>>> np.unique(np.array([1, 1, 2, 2, 3, 3]))
array([1., 2., 3.])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1., 2., 3.])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1., 0., 0.],
[2., 3., 4.]])
Return the indices of the original array that give the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 5, 3, 2], dtype=int64)
>>> a[indices]
array([1., 2., 3., 4., 6.])
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 4, 3, 1, 2, 1], dtype=int64)
>>> u[indices]
array([1., 2., 6., 4., 2., 3., 2.])
"""
return _mx_nd_np.unique(ar, return_index, return_inverse, return_counts, axis)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def add(x1, x2, out=None, **kwargs):
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
add : ndarray or scalar
The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>>
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
"""
return _mx_nd_np.add(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def subtract(x1, x2, out=None, **kwargs):
"""
Subtract arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be subtracted from each other. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape
of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
subtract : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[0., 0., 0.],
[3., 3., 3.],
[6., 6., 6.]])
"""
return _mx_nd_np.subtract(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def multiply(x1, x2, out=None, **kwargs):
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
"""
return _mx_nd_np.multiply(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def divide(x1, x2, out=None, **kwargs):
"""
Returns a true division of the inputs, element-wise.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 or
float64 type, which depends on your current default dtype.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Examples
--------
>>> np.true_divide(x, 4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
"""
return _mx_nd_np.divide(x1, x2, out=out)
@set_module('mxnet.numpy')
def true_divide(x1, x2, out=None):
"""Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Notes
-----
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 or
float64 type, which depends on your current default dtype.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
"""
return _mx_nd_np.true_divide(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.mod(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.mod(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmod(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.fmod(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def matmul(a, b, out=None, **kwargs):
"""
Matrix product of two arrays.
Parameters
----------
a, b : ndarray
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored.
If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
MXNetError
If the last dimension of a is not the same size as the second-to-last dimension of b.
If a scalar value is passed in.
See Also
--------
tensordot :
Sum products over arbitrary axes.
dot :
alternative matrix product with different broadcasting rules.
einsum :
Einstein summation convention.
Notes
-----
The behavior depends on the arguments in the following way.
- If both arguments are 2-D they are multiplied like conventional matrices.
- If either argument is N-D, N > 2, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
- If the first argument is 1-D, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
- If the second argument is 1-D, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
matmul differs from dot in two important ways:
- Multiplication by scalars is not allowed, use multiply instead.
- Stacks of matrices are broadcast together as if the matrices were elements,
respecting the signature (n,k),(k,m)->(n,m):
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4., 1.],
[2., 2.]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1., 2.])
>>> np.matmul(b, a)
array([1., 2.])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a, b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
array(98.)
>>> sum(a[0, 1, :] * b[0, :, 1])
array(98.)
Scalar multiplication raises an error.
>>> np.matmul([1, 2], 3)
Traceback (most recent call last):
...
mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.
"""
return _mx_nd_np.matmul(a, b, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def remainder(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.remainder(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.remainder(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def power(x1, x2, out=None, **kwargs):
"""
First array elements raised to powers from second array, element-wise.
Parameters
----------
x1 : ndarray or scalar
The bases.
x2 : ndarray or scalar
The exponent.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> x1 = np.arange(6)
>>> np.power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = np.array([1.0, 2.0, 3.0, 3.0, 2.0, 1.0])
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1., 2., 3., 3., 2., 1.],
[1., 2., 3., 3., 2., 1.]])
>>> np.power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
"""
return _mx_nd_np.power(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def lcm(x1, x2, out=None, **kwargs):
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays for computing lowest common multiple. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape of
one or the other).
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm(np.arange(6, dtype=int), 20)
array([ 0, 20, 20, 60, 20, 20], dtype=int64)
"""
return _mx_nd_np.lcm(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sin(x, out=None, **kwargs):
r"""
Trigonometric sine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The sine of each element of x. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sin(np.pi/2.)
1.0
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])
"""
return _mx_nd_np.sin(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cos(x, out=None, **kwargs):
r"""
Cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding cosine values. This is a scalar if x is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.cos(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.cos(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sinh(x, out=None, **kwargs):
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sinh(0)
0.0
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.sinh(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.sinh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cosh(x, out=None, **kwargs):
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cosh(0)
1.0
"""
return _mx_nd_np.cosh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def tanh(x, out=None, **kwargs):
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)``.
Parameters
----------
x : ndarray or scalar.
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
----------
y : ndarray or scalar
The corresponding hyperbolic tangent values.
Notes
-----
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
- input x does not support complex computation (like imaginary number)
>>> np.tanh(np.pi*1j)
TypeError: type <type 'complex'> not supported
Examples
--------
>>> np.tanh(np.array[0, np.pi]))
array([0. , 0.9962721])
>>> np.tanh(np.pi)
0.99627207622075
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array(1)
>>> out2 = np.tanh(np.array(0.1), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.tanh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.log10(np.array([1e-15, -3.]))
array([-15., nan])
"""
return _mx_nd_np.log10(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sqrt(x, out=None, **kwargs):
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : ndarray or scalar
The values whose square-roots are required.
out : ndarray, or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sqrt(np.array([1,4,9]))
array([1., 2., 3.])
>>> np.sqrt(np.array([4, -1, _np.inf]))
array([ 2., nan, inf])
"""
return _mx_nd_np.sqrt(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cbrt(x, out=None, **kwargs):
"""
Return the cube-root of an array, element-wise.
Parameters
----------
x : ndarray
The values whose cube-roots are required.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
y : ndarray
An array of the same shape as x, containing the cube cube-root of each element in x.
If out was provided, y is a reference to it. This is a scalar if x is a scalar.
Examples
----------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
"""
return _mx_nd_np.cbrt(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def abs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.abs(x)
array([1.2, 1.2])
"""
return _mx_nd_np.abs(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def fabs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs(np.array([-1.2, 1.2]))s
array([ 1.2, 1.2])
"""
return _mx_nd_np.fabs(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def absolute(x, out=None, **kwargs):
"""
Calculate the absolute value element-wise.
np.abs is a shorthand for this function.
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
absolute : ndarray
An ndarray containing the absolute value of each element in x.
Examples
----------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
"""
return _mx_nd_np.absolute(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def exp(x, out=None, **kwargs):
r"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.exp(1)
2.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])
"""
return _mx_nd_np.exp(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def expm1(x, out=None, **kwargs):
r"""
Calculate `exp(x) - 1` for all elements in the array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential minus one: `out = exp(x) - 1`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.expm1(1)
1.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])
"""
return _mx_nd_np.expm1(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arcsin(x, out=None, **kwargs):
r"""
Inverse sine, element-wise.
Parameters
----------
x : ndarray or scalar
`y`-coordinate on the unit circle.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
angle : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
Notes
-----
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
The inverse sine is also known as `asin` or sin^{-1}.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.arcsin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in
the following aspects:
- Only support ndarray or scalar now.
- `where` argument is not supported.
- Complex input is not supported.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
"""
return _mx_nd_np.arcsin(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arccos(x, out=None, **kwargs):
"""
Trigonometric inverse cosine, element-wise.
The inverse of cos so that, if y = cos(x), then x = arccos(y).
Parameters
----------
x : ndarray
x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that
the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
angle : ndarray
The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].
This is a scalar if x is a scalar.
Notes
----------
arccos is a multivalued function: for each x there are infinitely many numbers z such that
cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].
For real-valued input data types, arccos always returns real output.
For each value that cannot be expressed as a real number or infinity, it yields nan and sets
the invalid floating point error flag.
The inverse cos is also known as acos or cos^-1.
Examples
----------
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
"""
return _mx_nd_np.arccos(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arctan(x, out=None, **kwargs):
r"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. It lies is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
This is a scalar if `x` is a scalar.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, we do not have support for them yet.
The inverse tangent is also known as `atan` or tan^{-1}.
Examples
--------
>>> x = np.array([0, 1])
>>> np.arctan(x)
array([0. , 0.7853982])
>>> np.pi/4
0.7853981633974483
"""
return _mx_nd_np.arctan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sign(x, out=None, **kwargs):
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.
Parameters
----------
x : ndarray or a scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The sign of `x`.
This is a scalar if `x` is a scalar.
Note
-------
- Only supports real number as input elements.
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([-5., 4.5])
>>> np.sign(a)
array([-1., 1.])
Scalars as input:
>>> np.sign(4.0)
1.0
>>> np.sign(0)
0
Use ``out`` parameter:
>>> b = np.zeros((2, ))
>>> np.sign(a, out=b)
array([-1., 1.])
>>> b
array([-1., 1.])
"""
return _mx_nd_np.sign(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log(x, out=None, **kwargs):
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and
``nan`` according to the input.
This function differs from the original `numpy.log
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)
>>> np.log(a)
array([ 0., 1., 2., -inf], dtype=float64)
>>> # Using the default float32 dtype leads to slightly different behavior
>>> a = np.array([1, np.exp(1), np.exp(2), 0])
>>> np.log(a)
array([ 0., 0.99999994, 2., -inf])
>>> np.log(1)
0.0
"""
return _mx_nd_np.log(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def rint(x, out=None, **kwargs):
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.rint
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 1., 2., 2.])
"""
return _mx_nd_np.rint(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log2(x, out=None, **kwargs):
"""
Base-2 logarithm of x.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The logarithm base two of `x`, element-wise.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original `numpy.log2
<https://www.google.com/search?q=numpy+log2>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-inf, 0., 1., 4.])
"""
return _mx_nd_np.log2(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log1p(x, out=None, **kwargs):
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
Natural logarithm of 1 + x, element-wise. This is a scalar
if x is a scalar.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
cannot support complex-valued input.
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> a = np.array([3, 4, 5])
>>> np.log1p(a)
array([1.3862944, 1.609438 , 1.7917595])
"""
return _mx_nd_np.log1p(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def degrees(x, out=None, **kwargs):
"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding degree values; if `out` was supplied this is a
reference to it.
This is a scalar if `x` is a scalar.
Notes
-------
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported.
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> rad = np.arange(12.) * np.pi / 6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> # Use specified ``out`` ndarray:
>>> out = np.zeros((rad.shape))
>>> np.degrees(rad, out)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> out
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
"""
return _mx_nd_np.degrees(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def rad2deg(x, out=None, **kwargs):
r"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"rad2deg(x)" is "x * 180 / pi".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
"""
return _mx_nd_np.rad2deg(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def radians(x, out=None, **kwargs):
"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Input array in degrees.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding radian values. This is a scalar if x is a scalar.
Notes
-----
This function differs from the original `numpy.radians
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in
the following way(s):
- only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
- broadcasting to `out` of different shape is currently not supported
- when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,
3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],
dtype=float32)
"""
return _mx_nd_np.radians(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
r"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
Notes
-----
"deg2rad(x)" is "x * pi / 180".
This function differs from the original numpy.arange in the following aspects:
- Only support float32 and float64.
- `out` must be in the same size of input.
Examples
--------
>>> np.deg2rad(180)
3.1415927
"""
return _mx_nd_np.deg2rad(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def reciprocal(x, out=None, **kwargs):
r"""
Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : ndarray or scalar
The values whose reciprocals are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> x = np.array([1, 2., 3.33])
>>> np.reciprocal(x)
array([1. , 0.5 , 0.3003003])
Notes
-----
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.reciprocal
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
"""
return _mx_nd_np.reciprocal(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def square(x, out=None, **kwargs):
r"""
Return the element-wise square of the input.
Parameters
----------
x : ndarray or scalar
The values whose squares are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.square(2.)
4.0
>>> x = np.array([1, 2., -1])
>>> np.square(x)
array([1., 4., 1.])
Notes
-----
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.square
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in
the following aspects:
- Only support ndarray and scalar now.
- `where` argument is not supported.
- Complex input is not supported.
"""
return _mx_nd_np.square(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def negative(x, out=None, **kwargs):
r"""
Numerical negative, element-wise.
Parameters:
------------
x : ndarray or scalar
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored.
If provided, it must have a shape that the inputs broadcast to.
If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length
equal to the number of outputs.
Returns:
-------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.
Examples
--------
>>> np.negative(1)
-1
"""
return _mx_nd_np.negative(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def fix(x, out=None, **kwargs):
"""
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters:
----------
x : ndarray
An array of floats to be rounded
out : ndarray, optional
Output array
Returns:
-------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.ndarray of floats
Examples
---------
>>> np.fix(3.14)
3
"""
return _mx_nd_np.fix(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def tan(x, out=None, **kwargs):
r"""
Compute tangent element-wise.
Equivalent to np.sin(x)/np.cos(x) element-wise.
Parameters:
----------
x : ndarray
Input array.
out : ndarray or none, optional
A location into which the result is stored. If provided,
it must have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a keyword argument)
must have length equal to the number of outputs.
Returns:
-------
y : ndarray
The corresponding tangent values. This is a scalar if x is a scalar.
Examples
---------
>>> np.tan(np.array([-np.pi, np.pi/2, np.pi]))
array([-8.7422777e-08, -2.2877332e+07, 8.7422777e-08])
"""
return _mx_nd_np.tan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def ceil(x, out=None, **kwargs):
r"""
Return the ceiling of the input, element-wise.
The ceil of the ndarray `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
>>> # if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.ceil(np.array(3.5), a)
array(4.)
>>> a
array(4.)
"""
return _mx_nd_np.ceil(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def floor(x, out=None, **kwargs):
r"""
Return the floor of the input, element-wise.
The ceil of the ndarray `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\lfloor x \rfloor`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
>>> # if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.floor(np.array(3.5), a)
array(3.)
>>> a
array(3.)
"""
return _mx_nd_np.floor(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def invert(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _mx_nd_np.bitwise_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def bitwise_not(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _mx_nd_np.bitwise_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def trunc(x, out=None, **kwargs):
r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.trunc in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
"""
return _mx_nd_np.trunc(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def logical_not(x, out=None, **kwargs):
r"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : ndarray or scalar
Logical NOT is applied to the elements of `x`.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
This is a scalar if `x` is a scalar.
Notes
-----
This function differs from the original numpy.logical_not in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> x= np.array([True, False, 0, 1])
>>> np.logical_not(x)
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
"""
return _mx_nd_np.logical_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arcsinh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arcsinh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arcsinh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. DType of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arcsinh(a)
array([1.8309381, 2.2924316])
>>> np.arcsinh(1)
0.0
"""
return _mx_nd_np.arcsinh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arccosh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arccosh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arccosh(a)
array([1.8309381, 2.2924316])
>>> np.arccosh(1)
0.0
"""
return _mx_nd_np.arccosh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arctanh(x, out=None, **kwargs):
r"""
Inverse hyperbolic tangent, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arctanh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
Notes
-----
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arctanh in the following aspects:
- Do not support `where`, a parameter in numpy which indicates where to calculate.
- Do not support complex-valued input.
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot broadcast automatically. Shape of `out` must be same as the expected one.
- If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([0.0, -0.5])
>>> np.arctanh(a)
array([0., -0.54930615])
>>> np.arctanh(1)
0.0
"""
return _mx_nd_np.arctanh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : ndarray
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
"""
return _mx_nd_np.argsort(a, axis=axis, kind=kind, order=order)
@set_module('mxnet.numpy')
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : ndarray
Array to be sorted.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
"""
return _mx_nd_np.sort(a, axis=axis, kind=kind, order=order)
@set_module('mxnet.numpy')
def tensordot(a, b, axes=2):
r"""
tensordot(a, b, axes=2)
Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
`a` and `b`, and an ndarray object containing two ndarray
objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of `a` and the first ``N`` dimensions of `b` are summed
over.
Parameters
----------
a, b : ndarray, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) ndarray
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) ndarray
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements ndarray must be of the same length.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
"""
return _mx_nd_np.tensordot(a, b, axes)
@set_module('mxnet.numpy')
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments
"""
Compute the histogram of a set of data.
Parameters
----------
a : ndarray
Input data. The histogram is computed over the flattened array.
bins : int or ndarray
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float)
The lower and upper range of the bins. Required when `bins` is an integer.
Values outside the range are ignored. The first element of the range must
be less than or equal to the second.
normed : bool, optional
Not supported yet, coming soon.
weights : array_like, optional
Not supported yet, coming soon.
density : bool, optional
Not supported yet, coming soon.
Examples
--------
>>> np.histogram(np.arange(4), bins=np.arange(5))
[array([1, 1, 1, 1], dtype=int64), array([0., 1., 2., 3., 4.])]
"""
return _mx_nd_np.histogram(a, bins=bins, range=range, normed=normed, weights=weights, density=density)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def eye(N, M=None, k=0, dtype=float, **kwargs):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to N.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero,
except for the k-th diagonal, whose values are equal to one.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]], dtype=int64)
>>> np.eye(3, k=1)
array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
return _mx_nd_np.eye(N, M, k, dtype, **kwargs)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""
Return evenly spaced numbers over a specified interval.
Returns num evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : real number
The starting value of the sequence.
stop : real number
The end value of the sequence, unless endpoint is set to False. In
that case, the sequence consists of all but the last of num + 1
evenly spaced samples, so that stop is excluded. Note that the step
size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, stop is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype : dtype, optional
The type of the output array. If dtype is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start or
stop are array-like. By default (0), the samples will be along a new
axis inserted at the beginning. Use -1 to get an axis at the end.
Returns
-------
samples : ndarray
There are num equally spaced samples in the closed interval
`[start, stop]` or the half-open interval `[start, stop)`
(depending on whether endpoint is True or False).
step : float, optional
Only returned if retstep is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
Notes
-----
This function differs from the original `numpy.linspace
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in
the following aspects:
- `start` and `stop` do not support list, numpy ndarray and mxnet ndarray
- axis could only be 0
- There could be an additional `ctx` argument to specify the device, e.g. the i-th
GPU.
"""
return _mx_nd_np.linspace(start, stop, num, endpoint, retstep, dtype, axis, ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None):
r"""Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : int or float
``base ** start`` is the starting value of the sequence.
stop : int or float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Now, axis only support axis = 0.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
...
>>> power(base, y).astype(dtype)
...
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.44347, 464.15887, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.82794, 316.22775, 562.3413 ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396843, 6.349604 , 8. ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)
array([4, 5, 6, 8], dtype=int32)
>>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))
array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))
"""
return _mx_nd_np.logspace(start, stop, num, endpoint, base, dtype, axis, ctx=ctx)
# pylint: enable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def expand_dims(a, axis):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded array shape.
Parameters
----------
a : ndarray
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
squeeze : The inverse operation, removing singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1., 2.]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]
>>> y
array([[1.],
[2.]])
>>> y.shape
(2, 1)
Note that some examples may use None instead of np.newaxis. These are the same objects:
>>> np.newaxis is None
True
"""
return _npi.expand_dims(a, axis)
@set_module('mxnet.numpy')
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : ndarray or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0., 1., 2., 0., 1., 2.])
>>> np.tile(a, (2, 2))
array([[0., 1., 2., 0., 1., 2.],
[0., 1., 2., 0., 1., 2.]])
>>> np.tile(a, (2, 1, 2))
array([[[0., 1., 2., 0., 1., 2.]],
[[0., 1., 2., 0., 1., 2.]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1., 2., 1., 2.],
[3., 4., 3., 4.]])
>>> np.tile(b, (2, 1))
array([[1., 2.],
[3., 4.],
[1., 2.],
[3., 4.]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
Scalar as input:
>>> np.tile(2, 3)
array([2, 2, 2]) # repeating integer `2`
"""
return _mx_nd_np.tile(A, reps)
@set_module('mxnet.numpy')
def trace(a, offset=0, axis1=0, axis2=1, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : ndarray
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
out : ndarray, optional
Array into which the output is placed. It must be of the right shape
and right type to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
Examples
--------
>>> a = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> np.trace(a)
array(3.)
>>> a = np.arange(8).reshape((2, 2, 2))
>>> np.trace(a)
array([6., 8.])
>>> a = np.arange(24).reshape((2, 2, 2, 3))
>>> np.trace(a).shape
(2, 3)
"""
return _mx_nd_np.trace(a, offset, axis1, axis2, out)
@set_module('mxnet.numpy')
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : ndarray
Input array.
axes : list of ints, optional
By default, reverse the dimensions,
otherwise permute the axes according to the values given.
Returns
-------
p : ndarray
a with its axes permuted.
Notes
-----
This function differs from the original `numpy.transpose
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html>`_ in
the following way(s):
- only ndarray is accepted as valid input, python iterables are not supported
- the operator always returns an `ndarray` that does not share the memory with the input
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0., 1.],
[2., 3.]])
>>> np.transpose(x)
array([[0., 2.],
[1., 3.]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return _mx_nd_np.transpose(a, axes)
@set_module('mxnet.numpy')
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int
The number of repetitions for each element.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _mx_nd_np.repeat(a, repeats, axis)
@set_module('mxnet.numpy')
def tril(m, k=0):
r"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : ndarray, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
>>> np.tril(a, -1)
array([[ 0., 0., 0.],
[ 4., 0., 0.],
[ 7., 8., 0.],
[10., 11., 12.]])
"""
return _mx_nd_np.tril(m, k)
@set_module('mxnet.numpy')
def tri(N, M=None, k=0, dtype=None, ctx=None): # pylint: disable=redefined-outer-name
r"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 1., 0., 0., 0.]])
"""
return _mx_nd_np.tri(N, M, k, dtype, ctx)
@set_module('mxnet.numpy')
def triu_indices(n, k=0, m=None, ctx=None): # pylint: disable=redefined-outer-name
r"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, ..., 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return _mx_nd_np.triu_indices(n, k, m, ctx)
@set_module('mxnet.numpy')
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
"""
return _mx_nd_np.triu_indices_from(arr, k)
@set_module('mxnet.numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
if m is None:
m = n
return tuple(_mx_nd_np.tril_indices(n, k, m))
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def triu(m, k=0):
r"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu(np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]), -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
return _mx_nd_np.triu(m, k)
@set_module('mxnet.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
"""Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array.
Default dtype can be set to be consistent with offical numpy by `npx.set_np(dtype=True)`.
- When npx.is_np_default_dtype() returns False, default dtype is float32;
- When npx.is_np_default_dtype() returns True, default dtype is int64.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
Examples
--------
>>> np.arange(3)
array([0., 1., 2.])
>>> np.arange(3.0)
array([0., 1., 2.])
>>> np.arange(3,7)
array([3., 4., 5., 6.])
>>> np.arange(3,7,2)
array([3., 5.])
>>> np.arange(3).dtype
dtype('float32')
>>> npx.set_np(dtype=True)
>>> np.arange(3).dtype
dtype('int64')
"""
return _mx_nd_np.arange(start, stop, step, dtype, ctx)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
"""
return _mx_nd_np.split(ary, indices_or_sections, axis=axis)
@set_module('mxnet.numpy')
def array_split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an array of length l that should be split into n sections, it returns
l % n sub-arrays of size l//n + 1 and the rest of size l//n.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
Param used to determine the number and size of the subarray.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Examples
--------
>>> x = np.arange(9.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.array_split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]
"""
return _mx_nd_np.array_split(ary, indices_or_sections, axis=axis)
@set_module('mxnet.numpy')
def vsplit(ary, indices_or_sections):
r"""
vsplit(ary, indices_or_sections)
Split an array into multiple sub-arrays vertically (row-wise).
``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split
along the first axis regardless of the array dimension.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 0. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 0 the array is split. For example, ``[2, 3]`` would result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along axis 0, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Notes
-------
This function differs from the original `numpy.vsplit
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.vsplit.html>`_ in
the following aspects:
- Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
- In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
>>> # With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]), array([[[4., 5.],
[6., 7.]]])]
"""
return _mx_nd_np.vsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def dsplit(ary, indices_or_sections):
r"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 2. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 2 the array is split. For example, ``[2, 3]`` would result in
- ary[:, :, :2]
- ary[:, :, 2:3]
- ary[:, :, 3:]
If an index exceeds the dimension of the array along axis 2, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Notes
-------
This function differs from the original `numpy.dsplit
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.dsplit.html>`_ in
the following aspects:
- Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
- In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 2,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
"""
return _mx_nd_np.dsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def concatenate(seq, axis=0, out=None):
"""Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1., 2.],
[3., 4.],
[5., 6.]])
>>> np.concatenate((a, b.T), axis=1)
array([[1., 2., 5.],
[3., 4., 6.]])
>>> np.concatenate((a, b), axis=None)
array([1., 2., 3., 4., 5., 6.])
"""
return _mx_nd_np.concatenate(seq, axis=axis, out=out)
@set_module('mxnet.numpy')
def append(arr, values, axis=None): # pylint: disable=redefined-outer-name
"""
Append values to the end of an array.
Parameters
----------
arr : ndarray
Values are appended to a copy of this array.
values : ndarray
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
Examples
--------
>>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))
array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
When `axis` is specified, `values` must have the correct shape.
>>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
"""
return _mx_nd_np.append(arr, values, axis=axis)
@set_module('mxnet.numpy')
def stack(arrays, axis=0, out=None):
"""Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be correct,
matching that of what stack would have returned if no out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.rand(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.stack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> np.stack((a, b), axis=-1)
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.stack(arrays, axis=axis, out=out)
@set_module('mxnet.numpy')
def vstack(arrays, out=None):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
return _mx_nd_np.vstack(arrays)
@set_module('mxnet.numpy')
def row_stack(arrays):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
return _mx_nd_np.row_stack(arrays)
@set_module('mxnet.numpy')
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
--------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.column_stack(tup)
@set_module('mxnet.numpy')
def hstack(arrays):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis,
except for 1-D arrays where it concatenates along the first axis.
Rebuilds arrays divided by hsplit.
This function makes most sense for arrays with up to 3 dimensions.
For instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions concatenate,
stack and block provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
Examples
--------
>>> from mxnet import np,npx
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1., 2., 3., 2., 3., 4.])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.hstack(arrays)
@set_module('mxnet.numpy')
def dstack(arrays):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
The arrays must have the same shape along all but the third axis.
1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 3-D.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _npi.dstack(*arrays)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def maximum(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.maximum(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([2., 5., 4.])
>>> np.maximum(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[1. , 2. ],
[0.5, 2. ]])
"""
return _mx_nd_np.maximum(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmax(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmax(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([2., 5., 4.])
>>> np.fmax(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[1. , 2. ],
[0.5, 2. ]])
"""
return _mx_nd_np.fmax(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.minimum(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([1., 3., 2.])
>>> np.minimum(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[0.5, 0. ],
[0. , 1. ]])
"""
return _mx_nd_np.minimum(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmin(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The fmin of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmin(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([1., 3., 2.])
>>> np.fmin(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[0.5, 0. ],
[0. , 1. ]])
"""
return _mx_nd_np.fmin(x1, x2, out=out)
@set_module('mxnet.numpy')
def max(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : ndarray
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.max(a) # Maximum of the flattened array
array(3.)
>>> np.max(a, axis=0) # Maxima along the first axis
array([2., 3.])
>>> np.max(a, axis=1) # Maxima along the second axis
array([1., 3.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.max(b)
array(4.)
"""
return _mx_nd_np.max(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def min(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.min(a) # Minimum of the flattened array
array(0.)
>>> np.min(a, axis=0) # Minima along the first axis
array([0., 1.])
>>> np.min(a, axis=1) # Minima along the second axis
array([0., 2.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.min(b)
array(0.) # nan will be ignored
"""
return _mx_nd_np.min(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Parameters
----------
a : ndarray
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
Swapped array. This is always a copy of the input array.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1.],
[2.],
[3.]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> np.swapaxes(x,0,2)
array([[[0., 4.],
[2., 6.]],
[[1., 5.],
[3., 7.]]])
"""
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : ndarray
Array containing elements to clip.
a_min : scalar or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Notes
-----
array_like `a_min` and `a_max` are not supported.
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.])
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.clip(a, 3, 6, out=a)
array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
"""
from numbers import Number
if isinstance(a, Number):
# In case input is a scalar, the computation would fall back to native numpy.
# The value returned would be a python scalar.
return _np.clip(a, a_min, a_max, out=None)
return _mx_nd_np.clip(a, a_min, a_max, out=out)
@set_module('mxnet.numpy')
def argmax(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmax(a)
array(5.)
>>> np.argmax(a, axis=0)
array([1., 1., 1.])
>>> np.argmax(a, axis=1)
array([2., 2.])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0., 5., 2., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(1.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmax(a, axis=1, out=b)
array([2., 2.])
>>> b
array([2., 2.])
"""
return _mx_nd_np.argmax(a, axis, out)
@set_module('mxnet.numpy')
def argmin(a, axis=None, out=None):
r"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmin.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmin(a)
array(0.)
>>> np.argmin(a, axis=0)
array([0., 0., 0.])
>>> np.argmin(a, axis=1)
array([0., 0.])
>>> b = np.arange(6)
>>> b[2] = 0
>>> b
array([0., 1., 0., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(0.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmin(a, axis=1, out=b)
array([0., 0.])
>>> b
array([0., 0.])
"""
return _mx_nd_np.argmin(a, axis, out)
@set_module('mxnet.numpy')
def amax(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : ndarray
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.max(a) # Maximum of the flattened array
array(3.)
>>> np.max(a, axis=0) # Maxima along the first axis
array([2., 3.])
>>> np.max(a, axis=1) # Maxima along the second axis
array([1., 3.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.max(b)
array(4.)
"""
return _mx_nd_np.amax(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def amin(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.min(a) # Minimum of the flattened array
array(0.)
>>> np.min(a, axis=0) # Minima along the first axis
array([0., 1.])
>>> np.min(a, axis=1) # Minima along the second axis
array([0., 2.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.min(b)
array(0.) # nan will be ignored
"""
return _mx_nd_np.amin(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def average(a, axis=None, weights=None, returned=False, out=None):
"""
Compute the weighted average along the specified axis.
Parameters
--------
a : ndarray
Array containing data to be averaged.
axis : None or int or tuple of ints, optional
Axis or axes along which to average a.
The default, axis=None, will average over
all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
New in version 1.7.0.
If axis is a tuple of ints, averaging is
performed on all of the axes specified in the tuple
instead of a single axis or all the axes as before.
weights : ndarray, optional
An array of weights associated with the values in a, must be the same dtype with a.
Each value in a contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of a along the given axis) or of the same shape as a.
If weights=None, then all data in a are assumed to have a weight equal to one.
The 1-D calculation is: avg = sum(a * weights) / sum(weights)
The only constraint on weights is that sum(weights) must not be 0.
returned : bool, optional
Default is False.
If True, the tuple (average, sum_of_weights) is returned,
otherwise only the average is returned.
If weights=None, sum_of_weights is equivalent to
the number of elements over which the average is taken.
out : ndarray, optional
If provided, the calculation is done into this array.
Returns
--------
retval, [sum_of_weights] : ndarray
Return the average along the specified axis.
When returned is True, return a tuple with the average as the first element
and the sum of the weights as the second element. sum_of_weights is of the same type as retval.
If a is integral, the result dtype will be current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
otherwise it will be the same as dtype of a.
Raises
--------
MXNetError
- When all weights along axis sum to zero.
- When the length of 1D weights is not the same as the shape of a along axis.
- When given 1D weights, the axis is not specified or is not int.
- When the shape of weights and a differ, but weights are not 1D.
See also
--------
mean
Notes
--------
This function differs from the original `numpy.average`
<https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in
the following way(s):
- Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens
- Does not support complex dtype
- The dtypes of a and weights must be the same
- Integral a results in float32 or float64 returned dtype:
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
Examples
--------
>>> data = np.arange(1, 5)
>>> data
array([1., 2., 3., 4.])
>>> np.average(data)
array(2.5)
>>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
array(4.)
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> weights = np.array([0.25, 0.75])
array([0.25, 0.75])
>>> np.average(data, axis=1, weights=weights)
array([0.75, 2.75, 4.75])
"""
return _mx_nd_np.average(a, axis=axis, weights=weights, returned=returned, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements.
The average is taken over the flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
ndarray containing numbers whose mean is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean.
For integer inputs, the default is of your current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
For floating point inputs, it is the same as the input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default is None; if provided,
it must have the same shape and type as the expected output.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of ndarray, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If out=None, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
Notes
-----
This function differs from the original `numpy.mean
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
- only ndarray is accepted as valid input, python iterables or scalar is not supported
- default data type for integer input is float32 or float64, which depends on your current default dtype
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
array(2.5)
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.mean(a)
array(0.55)
>>> np.mean(a, dtype=np.float64)
array(0.55, dtype=float64)
"""
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
array(0.45)
>>> np.std(a, dtype=np.float64)
array(0.45, dtype=float64)
"""
return _mx_nd_np.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : ndarray
Input array.
obj : slice, int or ndarray of ints
Indicate indices of sub-arrays to remove along the specified axis.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, 1, 0)
array([[ 1., 2., 3., 4.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, slice(None, None, 2), 1)
array([[ 2., 4.],
[ 6., 8.],
[10., 12.]])
>>> np.delete(arr, np.array([1,3,5]), None)
array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])
>>> np.delete(arr, np.array([1,1,5]), None)
array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])
"""
return _mx_nd_np.delete(arr, obj, axis=axis)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance.
For arrays of integer type, the default is of your current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64.
For arrays of float types it is the same as the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
array(1.25)
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
array(0.2025)
>>> np.var(a, dtype=np.float64)
array(0.2025, dtype=float64)
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
return _mx_nd_np.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def indices(dimensions, dtype=None, ctx=None):
"""Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : data-type, optional
The desired data-type for the array. Default is `int64`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int64)
>>> grid[1] # column indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int64)
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0., 1., 2.],
[4., 5., 6.]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
return _mx_nd_np.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
@wrap_np_binary_func
def copysign(x1, x2, out=None, **kwargs):
r"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : ndarray or scalar
Values to change the sign of.
x2 : ndarray or scalar
The sign of `x2` is copied to `x1`.
out : ndarray or None, optional
A location into which the result is stored. It must be of the
right shape and right type to hold the output. If not provided
or `None`,a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-------
This function differs from the original `numpy.copysign
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in
the following aspects:
- ``where`` param is not supported.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> a = np.array([-1, 0, 1])
>>> np.copysign(a, -1.1)
array([-1., -0., -1.])
>>> np.copysign(a, np.arange(3)-1)
array([-1., 0., 1.])
"""
return _mx_nd_np.copysign(x1, x2, out=out)
@set_module('mxnet.numpy')
def ravel(x, order='C'):
r"""
ravel(x)
Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
x : ndarray
Input array. The elements in `x` are read in row-major, C-style order and
packed as a 1-D array.
order : `C`, optional
Only support row-major, C-style order.
Returns
-------
y : ndarray
y is an array of the same subtype as `x`, with shape ``(x.size,)``.
Note that matrices are special cased for backward compatibility, if `x`
is a matrix, then y is a 1-D ndarray.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support row-major, C-style order.
Examples
--------
It is equivalent to ``reshape(x, -1)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1. 2. 3. 4. 5. 6.]
>>> print(x.reshape(-1))
[1. 2. 3. 4. 5. 6.]
>>> print(np.ravel(x.T))
[1. 4. 2. 5. 3. 6.]
"""
return _mx_nd_np.ravel(x, order)
@set_module('mxnet.numpy')
def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name
"""
Converts a flat index or array of flat indices into a tuple of coordinate arrays.
Parameters:
-------------
indices : array_like
An integer array whose elements are indices into the flattened version of an array of dimensions shape.
Before version 1.6.0, this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling indices.
order : Only row-major is supported currently.
Returns:
-------------
unraveled_coords : ndarray
Each row in the ndarray has the same shape as the indices array.
Each column in the ndarray represents the unravelled index
Examples:
-------------
>>> np.unravel_index([22, 41, 37], (7,6))
[[3. 6. 6.]
[4. 5. 1.]]
>>> np.unravel_index(1621, (6,7,8,9))
[3, 1, 4, 1]
"""
return _mx_nd_np.unravel_index(indices, shape, order=order)
@set_module('mxnet.numpy')
def flatnonzero(a):
r"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to np.nonzero(np.ravel(a))[0].
Parameters
----------
a : array_like
Input data.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return _mx_nd_np.flatnonzero(a)
@set_module('mxnet.numpy')
def diag_indices_from(arr):
"""
This returns a tuple of indices that can be used to access the main diagonal of an array
a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is
the usual diagonal, for a.ndim > 2 this is the set of indices to access
a[i, i, ..., i] for i = [0..n-1].
Parameters:
-------------
arr : ndarray
Input array for acessing the main diagonal. All dimensions
should have equal length.
Return:
-------------
diag: tuple of ndarray
indices of the main diagonal.
Examples:
-------------
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> idx = np.diag_indices_from(a)
>>> idx
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[idx] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
"""
return _mx_nd_np.diag_indices_from(arr)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def hanning(M, dtype=None, ctx=None):
r"""Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
blackman, hamming
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,
0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,
0.07937312, 0. ])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
Text(0.5, 1.0, 'Hann window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.hanning(M, dtype=dtype, ctx=ctx)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def hamming(M, dtype=None, ctx=None):
r"""Return the hamming window.
The hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
blackman, hanning
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,
0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,
0.15302327, 0.08000001])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("hamming window")
Text(0.5, 1.0, 'hamming window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.hamming(M, dtype=dtype, ctx=ctx)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def blackman(M, dtype=None, ctx=None):
r"""Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
hamming, hanning
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1})
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,
7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,
4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.blackman(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("blackman window")
Text(0.5, 1.0, 'blackman window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.blackman(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.numpy')
def flip(m, axis=None, out=None):
r"""
flip(m, axis=None, out=None)
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : ndarray or scalar
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
out : ndarray or scalar, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
out : ndarray or scalar
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
"""
return _mx_nd_np.flip(m, axis, out=out)
@set_module('mxnet.numpy')
def flipud(m):
r"""
flipud(*args, **kwargs)
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag(np.array([1.0, 2, 3]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
array(True)
>>> np.flipud(np.array([1,2]))
array([2., 1.])
"""
return flip(m, 0)
@set_module('mxnet.numpy')
def fliplr(m):
r"""
fliplr(*args, **kwargs)
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
array(True)
"""
return flip(m, 1)
@set_module('mxnet.numpy')
def around(x, decimals=0, out=None, **kwargs):
r"""
around(x, decimals=0, out=None)
Evenly round to the given number of decimals.
Parameters
----------
x : ndarray or scalar
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
rounded_array : ndarray or scalar
An array of the same type as `x`, containing the rounded values.
A reference to the result is returned.
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
This function differs from the original numpy.prod in the following aspects:
- Cannot cast type automatically. Dtype of `out` must be same as the expected one.
- Cannot support complex-valued number.
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1, 2, 3, 11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _mx_nd_np.around(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
def round(x, decimals=0, out=None, **kwargs):
r"""
round(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return _mx_nd_np.round(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
def round_(x, decimals=0, out=None, **kwargs):
r"""
round_(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return _mx_nd_np.round_(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def arctan2(x1, x2, out=None, **kwargs):
r"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : ndarray or scalar
`y`-coordinates.
x2 : ndarray or scalar
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if
`x1` and `x2` are scalars.
Notes
-----
*arctan2* is identical to the `atan2` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
====== ====== ================
`x1` `x2` `arctan2(x1,x2)`
====== ====== ================
+/- 0 +0 +/- 0
+/- 0 -0 +/- pi
> 0 +/-inf +0 / +pi
< 0 +/-inf -0 / -pi
+/-inf +inf +/- (pi/4)
+/-inf -inf +/- (3*pi/4)
====== ====== ================
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> x = np.array([1, -1])
>>> y = np.array([0, 0])
>>> np.arctan2(x, y)
array([ 1.5707964, -1.5707964])
"""
return _mx_nd_np.arctan2(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def hypot(x1, x2, out=None, **kwargs):
r"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
This function differs from the original numpy.arange in the following aspects:
- Only support float16, float32 and float64.
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
"""
return _mx_nd_np.hypot(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_and(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)
array([26, 5], dtype=int32)
>>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))
array([0, 1], dtype=int32)
>>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))
array([ 2, 4, 16], dtype=int32)
>>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _mx_nd_np.bitwise_and(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_xor(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_xor(13, 17)
28
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor(np.array([31,3], dtype=np.int32), 5)
array([26, 6], dtype=int32)
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([26, 5], dtype=int32)
>>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _mx_nd_np.bitwise_xor(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_or(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise OR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_or(13, 17)
29
>>> np.bitwise_or(31, 5)
31
>>> np.bitwise_or(np.array([31,3], dtype=np.int32), 5)
array([31, 7])
>>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([31, 7])
>>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, True])
"""
return _mx_nd_np.bitwise_or(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def ldexp(x1, x2, out=None, **kwargs):
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : ndarray or scalar
Array of multipliers.
x2 : ndarray or scalar, int
Array of twos exponents.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Different from numpy, we allow x2 to be float besides int.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.])
"""
return _mx_nd_np.ldexp(x1, x2, out)
@set_module('mxnet.numpy')
def vdot(a, b):
r"""
Return the dot product of two vectors.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : ndarray
First argument to the dot product.
b : ndarray
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
array(30.)
>>> np.vdot(b, a)
array(30.)
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return tensordot(a.flatten(), b.flatten(), 1)
@set_module('mxnet.numpy')
def inner(a, b):
r"""Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : ndarray
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
array(2.)
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14., 38., 62.],
[ 86., 110., 134.]])
"""
return tensordot(a, b, [-1, -1])
@set_module('mxnet.numpy')
def outer(a, b):
r"""Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) ndarray
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) ndarray
Second input vector. Input is flattened if
not already 1-dimensional.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to N dimensions and other operations.
``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
"""
return tensordot(a.flatten(), b.flatten(), 0)
@set_module('mxnet.numpy')
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # pylint: disable=too-many-arguments
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : ndarray
Components of the first vector(s).
b : ndarray
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
Notes
-----
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = np.array([1., 2., 3.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([-3., 6., -3.])
One vector with dimension 2.
>>> x = np.array([1., 2.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([12., -6., -3.])
Equivalently:
>>> x = np.array([1., 2., 0.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([12., -6., -3.])
Both vectors with dimension 2.
>>> x = np.array([1., 2.])
>>> y = np.array([4., 5.])
>>> np.cross(x, y)
array(-3.)
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1., 2., 3.], [4., 5., 6.]])
>>> y = np.array([[4., 5., 6.], [1., 2., 3.]])
>>> np.cross(x, y)
array([[-3., 6., -3.],
[ 3., -6., 3.]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3., 3.],
[ 6., -6.],
[-3., 3.]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> y = np.array([[7., 8., 9.], [4., 5., 6.], [1., 2., 3.]])
>>> np.cross(x, y)
array([[ -6., 12., -6.],
[ 0., 0., 0.],
[ 6., -12., 6.]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24., 48., -24.],
[-30., 60., -30.],
[-36., 72., -36.]])
"""
return _mx_nd_np.cross(a, b, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
@set_module('mxnet.numpy')
def kron(a, b):
r"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : ndarray
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
"""
return _mx_nd_np.kron(a, b)
@set_module('mxnet.numpy')
def equal(x1, x2, out=None):
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.equal(1, np.ones(1))
array([ True])
"""
return _mx_nd_np.equal(x1, x2, out)
@set_module('mxnet.numpy')
def not_equal(x1, x2, out=None):
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False])
"""
return _mx_nd_np.not_equal(x1, x2, out)
@set_module('mxnet.numpy')
def greater(x1, x2, out=None):
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater(1, np.ones(1))
array([False])
"""
return _mx_nd_np.greater(x1, x2, out)
@set_module('mxnet.numpy')
def less(x1, x2, out=None):
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.less(1, np.ones(1))
array([False])
"""
return _mx_nd_np.less(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_and(x1, x2, out=None):
r"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical AND is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical AND operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_or, logical_not, logical_xor, bitwise_or
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _mx_nd_np.logical_and(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_or(x1, x2, out=None):
r"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_and, logical_not, logical_xor, bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([True, True])
"""
return _mx_nd_np.logical_or(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_xor(x1, x2, out=None):
r"""
Compute the truth value of x1 XOR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_and, logical_not, logical_or, bitwise_or
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _mx_nd_np.logical_xor(x1, x2, out)
@set_module('mxnet.numpy')
def greater_equal(x1, x2, out=None):
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater_equal(1, np.ones(1))
array([True])
"""
return _mx_nd_np.greater_equal(x1, x2, out)
@set_module('mxnet.numpy')
def less_equal(x1, x2, out=None):
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.less_equal(1, np.ones(1))
array([True])
"""
return _mx_nd_np.less_equal(x1, x2, out)
@set_module('mxnet.numpy')
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : ndarray
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
Notes
-----
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8., 9., 0., 1., 2., 3., 4., 5., 6., 7.])
>>> np.roll(x, -2)
array([2., 3., 4., 5., 6., 7., 8., 9., 0., 1.])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]])
>>> np.roll(x2, 1)
array([[9., 0., 1., 2., 3.],
[4., 5., 6., 7., 8.]])
>>> np.roll(x2, -1)
array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 0.]])
>>> np.roll(x2, 1, axis=0)
array([[5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.]])
>>> np.roll(x2, -1, axis=0)
array([[5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.]])
>>> np.roll(x2, 1, axis=1)
array([[4., 0., 1., 2., 3.],
[9., 5., 6., 7., 8.]])
>>> np.roll(x2, -1, axis=1)
array([[1., 2., 3., 4., 0.],
[6., 7., 8., 9., 5.]])
"""
return _mx_nd_np.roll(a, shift, axis=axis)
@set_module('mxnet.numpy')
def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
Parameters
----------
m : ndarray
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
Notes
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], 'int')
>>> m
array([[1, 2],
[3, 4]], dtype=int64)
>>> np.rot90(m)
array([[2, 4],
[1, 3]], dtype=int64)
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]], dtype=int64)
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1., 3.],
[0., 2.]],
[[5., 7.],
[4., 6.]]])
"""
return _mx_nd_np.rot90(m, k=k, axes=axes)
@set_module('mxnet.numpy')
def hsplit(ary, indices_or_sections):
"""Split an array into multiple sub-arrays horizontally (column-wise).
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int, list of ints or tuple of ints.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a list of sorted integers, the entries
indicate where along `axis` the array is split.
If an index exceeds the dimension of the array along `axis`,
it will raises errors. so index must less than or euqal to
the dimension of the array along axis.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Notes
------
- If `indices_or_sections` is given as an integer, but a split
does not result in equal division.It will raises ValueErrors.
- If indices_or_sections is an integer, and the number is 1, it will
raises an error. Because single output from split is not supported yet...
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
>>> np.hsplit(x, [3, 6])
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
array([[ 3.],
[ 7.],
[11.],
[15.]]),
array([], shape=(4, 0), dtype=float32)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
If ``ary`` has one dimension, 'axis' = 0.
>>> x = np.arange(4)
array([0., 1., 2., 3.])
>>> np.hsplit(x, 2)
[array([0., 1.]), array([2., 3.])]
If you want to produce an empty sub-array, you can see an example.
>>> np.hsplit(x, [2, 2])
[array([0., 1.]), array([], dtype=float32), array([2., 3.])]
"""
return _mx_nd_np.hsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def einsum(*operands, **kwargs):
r"""
einsum(subscripts, *operands, out=None, optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of ndarray
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
optimize : {False, True}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
Notes
-----
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`np.trace`.
* Return a diagonal, :py:func:`np.diag`.
* Array axis summations, :py:func:`np.sum`.
* Transpositions and permutations, :py:func:`np.transpose`.
* Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.
* Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.
* Tensor contractions, :py:func:`np.tensordot`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <np.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
The ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. 'optimal' is not supported
for now.
This function differs from the original `numpy.einsum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in
the following way(s):
- Does not support 'optimal' strategy
- Does not support the alternative subscript like
`einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`
- Does not produce view in any cases
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
array(60.)
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0., 6., 12., 18., 24.])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10., 35., 60., 85., 110.])
>>> np.sum(a, axis=1)
array([ 10., 35., 60., 85., 110.])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10., 35., 60., 85., 110.])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.einsum('ij->ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.transpose(c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
Vector inner products:
>>> np.einsum('i,i', b, b)
array(30.)
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.dot(a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.einsum('...j,j', a, b)
array([ 30., 80., 130., 180., 230.])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.einsum(',ij', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.multiply(3, c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0., 1., 2., 3., 4.],
[0., 2., 4., 6., 8.]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('k...,jk', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path. Performance
improvements can be particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
# Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
# Greedy `einsum` (faster optimal path approximation): ~0.117ms
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)
"""
return _mx_nd_np.einsum(*operands, **kwargs)
@set_module('mxnet.numpy')
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : ndarray
Input array.
obj : int, slice or ndarray of int64
Object that defines the index or indices before which `values` is
inserted.
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (only support int32 and int64 element).
values : ndarray
Values to insert into `arr`.
If the type of values is different from that of arr, values is converted
to the type of arr.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
Notes
-----
- Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
- If obj is a ndarray, it's dtype only supports int64
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1., 1.],
[2., 2.],
[3., 3.]])
>>> np.insert(a, 1, np.array(5))
array([1., 5., 1., 2., 2., 3., 3.])
>>> np.insert(a, 1, np.array(5), axis=1)
array([[1., 5., 1.],
[2., 5., 2.],
[3., 5., 3.]])
Difference between sequence and scalars:
>>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> b = a.flatten()
>>> b
array([1., 1., 2., 2., 3., 3.])
>>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))
array([1., 1., 5., 6., 2., 2., 3., 3.])
>>> np.insert(b, slice(2, 4), np.array([5, 6]))
array([1., 1., 5., 2., 6., 2., 3., 3.])
# type casting
>>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))
array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)
>>> x = np.arange(8).reshape(2, 4)
>>> idx = np.array([1, 3], dtype=np.int64)
>>> np.insert(x, idx, np.array([999]), axis=1)
array([[ 0., 999., 1., 2., 999., 3.],
[ 4., 999., 5., 6., 999., 7.]])
"""
return _mx_nd_np.insert(arr, obj, values, axis=axis)
@set_module('mxnet.numpy')
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
Parameters
----------
a : ndarray
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
ndarray.nonzero :
Equivalent ndarray method.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]], dtype=int32)
>>> np.nonzero(x)
(array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.stack(np.nonzero(x)))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]], dtype=int64)
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9], dtype=int32)
>>> a[a > 3]
array([4, 5, 6, 7, 8, 9], dtype=int32)
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
"""
return _mx_nd_np.nonzero(a)
@set_module('mxnet.numpy')
def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array
q : array_like
Percentile or sequence of percentiles to compute.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The default is to
compute the percentile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must have the same
shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
overwrite_input : bool, optional (Not supported yet)
If True, then allow the input array a to be modified by intermediate calculations,
to save memory. In this case, the contents of the input a after this function
completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use when the
desired percentile lies between two data points i < j:
'linear': i + (j - i) * fraction, where fraction is the fractional part of the
index surrounded by i and j.
'lower': i.
'higher': j.
'nearest': i or j, whichever is nearest.
'midpoint': (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
Returns
-------
percentile : scalar or ndarray
Output array.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, np.array(50))
array(3.5)
>>> np.percentile(a, np.array(50), axis=0)
array([6.5, 4.5, 2.5])
>>> np.percentile(a, np.array(50), axis=1)
array([7., 2.])
>>> np.percentile(a, np.array(50), axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.percentile(a, np.array(50), axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, np.array(50), axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
"""
return _mx_nd_np.percentile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@set_module('mxnet.numpy')
def median(a, axis=None, out=None, overwrite_input=None, keepdims=False):
r"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float32``, then the output data-type is
``np.float32``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([7., 2.])
"""
return _mx_nd_np.median(a, axis=axis, overwrite_input=overwrite_input,
keepdims=keepdims, out=out)
@set_module('mxnet.numpy')
def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th quantile of the data along the specified axis.
New in version 1.15.0.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
q : ndarray
Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed.
The default is to compute the quantile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j:
linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.
lower: i.
higher: j.
nearest: i or j, whichever is nearest.
midpoint: (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original array a.
Returns
-------
quantile : ndarray
If q is a single quantile and axis=None, then the result is a scalar.
If multiple quantiles are given, first axis of the result corresponds to the quantiles.
The other axes are the axes that remain after the reduction of a.
If out is specified, that array is returned instead.
See also
--------
mean
Notes
-----
Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum
to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors
as well as the interpolation parameter will determine the quantile if the normalized ranking
does not match the location of q exactly. This function is the same as the median if q=0.5,
the same as the minimum if q=0.0 and the same as the maximum if q=1.0.
This function differs from the original `numpy.quantile
<https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in
the following aspects:
- q must be ndarray type even if it is a scalar
- do not support overwrite_input
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10., 7., 4.],
[3., 2., 1.]])
>>> q = np.array(0.5)
>>> q
array(0.5)
>>> np.quantile(a, q)
array(3.5)
>>> np.quantile(a, q, axis=0)
array([6.5, 4.5, 2.5])
>>> np.quantile(a, q, axis=1)
array([7., 2.])
>>> np.quantile(a, q, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.quantile(a, q, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, q, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> out
array([6.5, 4.5, 2.5])
"""
return _mx_nd_np.quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@set_module('mxnet.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
This function differs from the original `numpy.shares_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `may_share_memory` in MXNet DeepNumPy
"""
return _mx_nd_np.shares_memory(a, b, max_work)
@set_module('mxnet.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
This function differs from the original `numpy.may_share_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in
the following way(s):
- Does not support `max_work`, it is a dummy argument
- Actually it is same as `shares_memory` in MXNet DeepNumPy
"""
return _mx_nd_np.may_share_memory(a, b, max_work)
@set_module('mxnet.numpy')
def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name
r"""
Calculate the n-th discrete difference along the given axis.
Parameters
----------
a : ndarray
Input array
n : int, optional
The number of times values are differenced. If zero, the input is returned as-is.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
prepend, append : ndarray, optional
Not supported yet
Returns
-------
diff : ndarray
The n-th differences.
The shape of the output is the same as a except along axis where the dimension is smaller by n.
The type of the output is the same as the type of the difference between any two elements of a.
This is the same as the type of a in most cases.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
Notes
-----
Optional inputs `prepend` and `append` are not supported yet
"""
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _mx_nd_np.diff(a, n=n, axis=axis)
@set_module('mxnet.numpy')
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : ndarray
If necessary, will be flattened before the differences are taken.
to_end : ndarray or scalar, optional
Number(s) to append at the end of the returned differences.
to_begin : ndarray or scalar, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1., 2., 3., -7.])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
rray([-99., 1., 2., 3., -7., 88., 99.])
The returned array is always 1D.
>>> y = np.array([[1, 2, 4], [1, 6, 24]])
>>> np.ediff1d(y)
array([ 1., 2., -3., 5., 18.])
>>> np.ediff1d(x, to_begin=y)
array([ 1., 2., 4., 1., 6., 24., 1., 2., 3., -7.])
"""
return _mx_nd_np.ediff1d(ary, to_end=to_end, to_begin=to_begin)
@set_module('mxnet.numpy')
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : ndarray
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Notes
-----
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, taken
from `a` as they are laid out in memory, disregarding strides and axes.
(This is in case the new shape is smaller. For larger, see above.)
This functionality is therefore not suitable to resize images,
or data where each axis represents a separate and distinct entity.
Examples
--------
>>> a = np.array([[0, 1], [2, 3]])
>>> np.resize(a, (2, 3))
array([[0., 1., 2.],
[3., 0., 1.]])
>>> np.resize(a, (1, 4))
array([[0., 1., 2., 3.]])
>>> np.resize(a,(2, 4))
array([[0., 1., 2., 3.],
[0., 1., 2., 3.]])
"""
return _mx_nd_np.resize(a, new_shape)
@set_module('mxnet.numpy')
def interp(x, xp, fp, left=None, right=None, period=None): # pylint: disable=too-many-arguments
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : ndarray
The x-coordinates of the interpolated values.
xp : 1-D array of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D array of floats
The y-coordinates of the data points, same length as `xp`.
left : optional float corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
right : optional float corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
.. versionadded:: 1.10.0
Returns
-------
y : float (corresponding to fp) or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
return _mx_nd_np.interp(x, xp, fp, left=left, right=right, period=period)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1], dtype=int64)
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0], dtype=int64)
>>> np.full_like(x, 0.1, dtype=np.float64)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)
>>> np.full_like(x, np.nan, dtype=np.float64)
array([nan, nan, nan, nan, nan, nan], dtype=float64)
>>> y = np.arange(6, dtype=np.float32)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
return _mx_nd_np.full_like(a, fill_value=fill_value, dtype=dtype, order=order, ctx=ctx, out=out)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def zeros_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.zeros_like(x)
array([[0., 0., 0.],
[0., 0., 0.]])
>>> np.zeros_like(x, int)
array([[0, 0, 0],
[0, 0, 0]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.zeros_like(y)
array([0., 0., 0.], dtype=float64)
"""
return _mx_nd_np.full_like(a, fill_value=0, dtype=dtype, order=order, ctx=ctx, out=ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def ones_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of ones with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.ones_like(x)
array([[1., 1., 1.],
[1., 1., 1.]])
>>> np.ones_like(x, int)
array([[1, 1, 1],
[1, 1, 1]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.ones_like(y)
array([1., 1., 1.], dtype=float64)
"""
return _mx_nd_np.full_like(a, fill_value=1, dtype=dtype, order=order, ctx=ctx, out=out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def fill_diagonal(a, val, wrap=False):
"""
Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
locations with indices ``a[i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
wrap : bool
For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affects only tall matrices.
Examples
--------
>>> a = np.zeros((3, 3), int)
>>> np.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = np.zeros((3, 3, 3, 3), int)
>>> np.fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
The wrap option affects only tall matrices:
>>> # tall matrices no wrap
>>> a = np.zeros((5, 3), int)
>>> np.fill_diagonal(a, 4)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[0, 0, 0]])
>>> # tall matrices wrap
>>> a = np.zeros((5, 3), int)
>>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[4, 0, 0]])
>>> # wide matrices
>>> a = np.zeros((3, 5), int)
>>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
The anti-diagonal can be filled by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.zeros((3, 3), int);
>>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
>>> a
array([[0, 0, 1],
[0, 2, 0],
[3, 0, 0]])
>>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
>>> a
array([[0, 0, 3],
[0, 2, 0],
[1, 0, 0]])
Note that the order in which the diagonal is filled varies depending
on the flip function.
"""
_mx_nd_np.fill_diagonal(a, val=val, wrap=wrap)
@set_module('mxnet.numpy')
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : scalar
ndarray
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
Gluon does not support copy = False.
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.13
Returns
-------
out : ndarray
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.nan_to_num(np.inf)
1.7976931348623157e+308
>>> np.nan_to_num(-np.inf)
-1.7976931348623157e+308
>>> np.nan_to_num(np.nan)
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,
1.2800000e+02])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,
1.2800000e+02])
>>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype="float64")/0
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y)
array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],
[ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)
>>> np.nan_to_num(y, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
"""
return _mx_nd_np.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf)
@set_module('mxnet.numpy')
def squeeze(x, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Raises
------
ValueError
If `axis` is not `None`, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
"""
return _mx_nd_np.squeeze(x, axis=axis)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isnan(x, out=None, **kwargs):
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is NaN, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This function differs from the original `numpy.isinf
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))
array([ True, False, False])
"""
return _mx_nd_np.isnan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isinf(x, out=None, **kwargs):
"""
Test element-wise for positive or negative infinity.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive or negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
This function differs from the original `numpy.isnan
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
- Does not support complex number for now
- Input type does not support Python native iterables(list, tuple, ...).
- ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output.
- ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output.
- ``out`` param does not support scalar input case.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool_)
>>> np.isinf(x, y)
array([ True, False, True])
>>> y
array([ True, False, True])
"""
return _mx_nd_np.isinf(x, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isposinf(x, out=None, **kwargs):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isposinf(np.inf)
True
>>> np.isposinf(-np.inf)
False
>>> np.isposinf(np.nan)
False
>>> np.isposinf(np.array([-np.inf, 0., np.inf]))
array([False, False, True])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isposinf(x, y)
array([False, False, True])
>>> y
array([False, False, True])
"""
return _mx_nd_np.isposinf(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isneginf(x, out=None, **kwargs):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isneginf(-np.inf)
True
>>> np.isneginf(np.inf)
False
>>> np.isneginf(float('-inf'))
True
>>> np.isneginf(np.array([-np.inf, 0., np.inf]))
array([ True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isneginf(x, y)
array([ True, False, False])
>>> y
array([ True, False, False])
"""
return _mx_nd_np.isneginf(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isfinite(x, out=None, **kwargs):
"""
Test element-wise for finiteness (not infinity or not Not a Number).
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
Not a Number, positive infinity and negative infinity are considered to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity.
But infinity is equivalent to positive infinity. Errors result if the second argument
is also supplied when x is a scalar input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(-np.inf)
False
>>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isfinite(x, y)
array([False, True, False])
>>> y
array([False, True, False])
"""
return _mx_nd_np.isfinite(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def where(condition, x=None, y=None):
"""where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. The rest of this documentation
covers only the case where all three arguments are provided.
Parameters
----------
condition : ndarray
Where True, yield `x`, otherwise yield `y`.
x, y : ndarray
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape. `x` and `y` must have the same dtype.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.where(a < 5, a, 10*a)
array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])
This can be used on multidimensional arrays too:
>>> cond = np.array([[True, False], [True, True]])
>>> x = np.array([[1, 2], [3, 4]])
>>> y = np.array([[9, 8], [7, 6]])
>>> np.where(cond, x, y)
array([[1., 8.],
[3., 4.]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = onp.ogrid[:3, :4]
>>> x = np.array(x)
>>> y = np.array(y)
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]], dtype=int64)
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0., 1., 2.],
[ 0., 2., -1.],
[ 0., 3., -1.]])
"""
return _mx_nd_np.where(condition, x, y)
@set_module('mxnet.numpy')
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : ndarray
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : ndarray
An array of numbers, at which to evaluate p.
Returns
-------
values : ndarray
Result array of polynomials
Notes
-----
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
- Does not support poly1d.
- X should be ndarray type even if it contains only one element.
Examples
--------
>>> p = np.array([3, 0, 1])
array([3., 0., 1.])
>>> x = np.array([5])
array([5.])
>>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1
array([76.])
>>> x = np.array([5, 4])
array([5., 4.])
>>> np.polyval(p, x)
array([76., 49.])
"""
return _mx_nd_np.polyval(p, x)
@set_module('mxnet.numpy')
def bincount(x, weights=None, minlength=0):
"""
Count number of occurrences of each value in array of non-negative ints.
Parameters
----------
x : ndarray
input array, 1 dimension, nonnegative ints.
weights: ndarray
input weigths same shape as x. (Optional)
minlength: int
A minimum number of bins for the output. (Optional)
Returns
--------
out : ndarray
the result of binning the input array. The length of out is equal to amax(x)+1.
Raises
--------
Value Error
If the input is not 1-dimensional, or contains elements with negative values,
or if minlength is negative
TypeError
If the type of the input is float or complex.
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return _mx_nd_np.bincount(x, weights=weights, minlength=minlength)
@set_module('mxnet.numpy')
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 1. Copies are made only if necessary.
See also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(np.array(1), np.array([3, 4]))
[array([1.]), array([3., 4.])]
"""
return _mx_nd_np.atleast_1d(*arys)
@set_module('mxnet.numpy')
def atleast_2d(*arys):
"""
Convert inputs to arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 2. Copies are made only if necessary.
See also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(np.array(1), np.array([1, 2]), np.array([[1, 2]]))
[array([[1.]]), array([[1., 2.]]), array([[1., 2.]])]
"""
return _mx_nd_np.atleast_2d(*arys)
@set_module('mxnet.numpy')
def atleast_3d(*arys):
"""
Convert inputs to arrays with at least three dimension.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 3.
For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1),
and a 2-D array of shape (M, N) becomes a view of shape (M, N, 1).
See also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> for arr in np.atleast_3d(np.array([1, 2]), np.array([[1, 2]]), np.array([[[1, 2]]])):
... print(arr, arr.shape)
...
[[[1.]
[2.]]] (1, 2, 1)
[[[1.]
[2.]]] (1, 2, 1)
[[[1. 2.]]] (1, 1, 2)
"""
return _mx_nd_np.atleast_3d(*arys)
@set_module('mxnet.numpy')
def pad(x, pad_width=None, mode="constant", **kwargs): # pylint: disable=too-many-arguments
# pylint: disable=too-many-return-statements
"""
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
not supported yet
'maximum'
Pads with the maximum value of all of the
vector along each axis.
'mean'
not supported yet
'median'
not supported yet
'minimum'
Pads with the minimum value of all of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
not supported yet.
'empty'
not supported yet.
<function>
not supported yet.
stat_length : not supported yet
constant_values : scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
Default is 0.
end_values : not supported yet
reflect_type : {'even', 'odd'}, optional
only support even now
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
Examples
--------
>>> a = [1, 2, 3, 4, 5]
>>> np.pad(a, (2, 3), 'edge')
array([1, 1, 1, ..., 5, 5, 5])
>>> np.pad(a, (2, 2), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> np.pad(a, (2, 2), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = [[1, 2], [3, 4]]
>>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = [1, 2, 3, 4, 5]
>>> np.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> np.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
>>> np.pad(a, ((2, 2), (2, 2)), pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
"""
return _mx_nd_np.pad(x, pad_width=pad_width, mode=mode, **kwargs)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def prod(a, axis=None, dtype=None, out=None, keepdims=False, initial=None): # pylint: disable=too-many-arguments
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
where : not supported
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _mx_nd_np.prod(a, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _mx_nd_np.cumsum(a, axis=axis, dtype=dtype, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : integer
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start: int, optional
The axis is rolled until it lies before this position.
The default, 0, results in a “complete” roll.
Returns
-------
res : ndarray
A view after applying rollaxis to `a` is returned.
-----
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
return _mx_nd_np.rollaxis(a, axis, start)
@set_module('mxnet.numpy')
def diag(v, k=0):
"""
Extracts a diagonal or constructs a diagonal array.
- 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero.
- 2-D arrays: extracts the k-th Diagonal
Parameters
----------
array : ndarray
The array to apply diag method.
k : offset
extracts or constructs kth diagonal given input array
Returns
----------
out : ndarray
The extracted diagonal or constructed diagonal array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
return _mx_nd_np.diag(v, k=k)
@set_module('mxnet.numpy')
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
return _mx_nd_np.diagflat(v, k=k)
@set_module('mxnet.numpy')
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
If a is 2-D, returns the diagonal of a with the given offset, i.e., the collection of elements of
the form a[i, i+offset]. If a has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-array whose diagonal is returned. The shape of the
resulting array can be determined by removing axis1 and axis2 and appending an index to the
right equal to the size of the resulting diagonals.
Parameters
----------
a : ndarray
Input data from which diagonal are taken.
offset: int, Optional
Offset of the diagonal from the main diagonal
axis1: int, Optional
Axis to be used as the first axis of the 2-D sub-arrays
axis2: int, Optional
Axis to be used as the second axis of the 2-D sub-arrays
Returns
-------
out : ndarray
Output result
Raises
-------
ValueError: If the dimension of a is less than 2.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> np.diagonal(a)
array([0, 3])
>>> np.diagonal(a, 1)
array([1])
>>> a = np.arange(8).reshape(2,2,2)
>>>a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.diagonal(a, 0, 0, 1)
array([[0, 6],
[1, 7]])
"""
return _mx_nd_np.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)
# pylint: disable=redefined-outer-name, too-many-arguments
@set_module('mxnet.numpy')
def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None):
r"""
Sum of array elements over a given axis.
Parameters
----------
a : ndarray
Input data.
axis : None or int, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The default type is float32.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
initial: Currently only supports None as input, optional
Starting value for the sum.
Currently not implemented. Please use ``None`` as input or skip this argument.
out : ndarray or None, optional
Alternative output array in which to place the result. It must have
the same shape and dtype as the expected output.
Returns
-------
sum_along_axis : ndarray
An ndarray with the same shape as `a`, with the specified
axis removed. If an output array is specified, a reference to
`out` is returned.
Notes
-----
- Input type does not support Python native iterables.
- "out" param: cannot perform auto type change. out ndarray's dtype must be the same as the expected output.
- "initial" param is not supported yet. Please use None as input.
- Arithmetic is modular when using integer types, and no error is raised on overflow.
- The sum of an empty array is the neutral element 0:
>>> a = np.empty(1)
>>> np.sum(a)
array(0.)
This function differs from the original `numpy.sum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html>`_ in
the following aspects:
- Input type does not support Python native iterables(list, tuple, ...).
- "out" param: cannot perform auto type cast. out ndarray's dtype must be the same as the expected output.
- "initial" param is not supported yet. Please use ``None`` as input or skip it.
- The default type is float32.
Examples
--------
>>> a = np.array([0.5, 1.5])
>>> np.sum(a)
array(2.)
>>> a = np.array([0.5, 0.7, 0.2, 1.5])
>>> np.sum(a, dtype=np.int32)
array(2, dtype=int32)
>>> a = np.array([[0, 1], [0, 5]])
>>> np.sum(a)
array(6.)
>>> np.sum(a, axis=0)
array([0., 6.])
>>> np.sum(a, axis=1)
array([1., 5.])
With output ndarray:
>>> a = np.array([[0, 1], [0, 5]])
>>> b = np.ones((2,), dtype=np.float32)
>>> np.sum(a, axis = 0, out=b)
array([0., 6.])
>>> b
array([0., 6.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
array(-128, dtype=int8)
"""
return _mx_nd_np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where)
# pylint: enable=redefined-outer-name, too-many-arguments
| apache-2.0 | 568,628,752,666,648,770 | 32.522831 | 152 | 0.5828 | false |
Azure/azure-sdk-for-python | sdk/servicefabric/azure-mgmt-servicefabric/azure/mgmt/servicefabric/aio/operations/_managed_cluster_versions_operations.py | 1 | 4986 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedClusterVersionsOperations:
"""ManagedClusterVersionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.servicefabric.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_by_os(
self,
location: str,
os_type: Union[str, "_models.Enum23"],
**kwargs: Any
) -> List["_models.ManagedClusterVersionDetails"]:
"""Gets the list of Service Fabric cluster code versions available for the specified OS type.
Gets all available code versions for Service Fabric cluster resources by OS type.
:param location: The location for the cluster code versions. This is different from cluster
location.
:type location: str
:param os_type: The operating system of the cluster.
:type os_type: str or ~azure.mgmt.servicefabric.models.Enum23
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ManagedClusterVersionDetails, or the result of cls(response)
:rtype: list[~azure.mgmt.servicefabric.models.ManagedClusterVersionDetails]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ManagedClusterVersionDetails"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01-preview"
accept = "application/json"
# Construct URL
url = self.list_by_os.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'osType': self._serialize.url("os_type", os_type, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorModel, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[ManagedClusterVersionDetails]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_os.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceFabric/managedclusters/locations/{location}/osType/{osType}/clusterVersions'} # type: ignore
| mit | 6,898,372,519,886,849,000 | 47.407767 | 187 | 0.67509 | false |
Luxapodular/processing.py | examples.py/3D/Form/CubicGrid.py | 7 | 1157 | """
Cubic Grid
by Ira Greenberg.
3D translucent colored grid uses nested pushMatrix()
and popMatrix() functions.
"""
boxSize = 40
margin = boxSize*2
depth = 400
def setup():
size(640, 360, P3D)
noStroke()
def draw():
background(255)
# Center and spin grid
translate(width/2, height/2, -depth)
rotateY(frameCount * 0.01)
rotateX(frameCount * 0.01)
# Build grid using multiple translations
i = -depth/2+margin
while i <= depth/2-margin:
pushMatrix()
j = -height+margin
while j <= height-margin:
pushMatrix()
k = -width + margin
while k <= width-margin:
# Base fill color on counter values, abs function
# ensures values stay within legal range
boxFill = color(abs(i), abs(j), abs(k), 50)
pushMatrix()
translate(k, j, i)
fill(boxFill)
box(boxSize, boxSize, boxSize)
popMatrix()
k += boxSize
popMatrix()
j += boxSize
popMatrix()
i += boxSize
| apache-2.0 | 3,391,533,236,897,459,700 | 24.711111 | 66 | 0.521175 | false |
Acehaidrey/incubator-airflow | airflow/contrib/operators/s3_to_sftp_operator.py | 7 | 1179 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.s3_to_sftp`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.transfers.s3_to_sftp import S3ToSFTPOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.transfers.s3_to_sftp`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 | 2,654,155,200,695,928,000 | 39.655172 | 97 | 0.7676 | false |
googleinterns/ddsp-docker | mvp/trainer/ddsp_run_hypertune.py | 1 | 7760 | # Copyright 2020 The DDSP Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Train, evaluate, or sample (from) a ddsp model.
Usage:
================================================================================
For training, you need to specify --gin_file for both the model and the dataset.
You can optionally specify additional params with --gin_param.
The pip install installs a `ddsp_run` script that can be called directly.
================================================================================
ddsp_run \
--mode=train \
--alsologtostderr \
--save_dir=~/tmp/$USER-ddsp-0 \
--gin_file=models/ae.gin \
--gin_file=datasets/nsynth.gin \
--gin_param=batch_size=16
================================================================================
For evaluation and sampling, only the dataset file is required.
================================================================================
ddsp_run \
--mode=eval \
--alsologtostderr \
--save_dir=~/tmp/$USER-ddsp-0 \
--gin_file=datasets/nsynth.gin
ddsp_run \
--mode=sample \
--alsologtostderr \
--save_dir=~/tmp/$USER-ddsp-0 \
--gin_file=datasets/nsynth.gin
================================================================================
The directory `gin/papers/` stores configs that give the specific models and
datasets used for a paper's experiments, so only require one gin file to train.
================================================================================
ddsp_run \
--mode=train \
--alsologtostderr \
--save_dir=~/tmp/$USER-ddsp-0 \
--gin_file=papers/iclr2020/nsynth_ae.gin
"""
import os
import time
from absl import app
from absl import flags
from absl import logging
from ddsp.training import eval_util
from ddsp.training import models
import gin
import pkg_resources
import tensorflow.compat.v2 as tf
import helper_functions
import magenta_ddsp_internals.train_util as train_util
import magenta_ddsp_internals.trainers as trainers
FLAGS = flags.FLAGS
# Program flags.
flags.DEFINE_enum('mode', 'train', ['train', 'eval', 'sample'],
'Whether to train, evaluate, or sample from the model.')
flags.DEFINE_string('save_dir', '~/tmp/ddsp',
'Path where checkpoints and summary events will be saved '
'during training and evaluation.')
flags.DEFINE_string('restore_dir', '',
'Path from which checkpoints will be restored before '
'training. Can be different than the save_dir.')
flags.DEFINE_string('tpu', '', 'Address of the TPU. No TPU if left blank.')
flags.DEFINE_multi_string('gpu', [],
'Addresses of GPUs for sync data-parallel training.'
'Only needs to be specified for using multiple GPUs.')
flags.DEFINE_boolean('allow_memory_growth', False,
'Whether to grow the GPU memory usage as is needed by the '
'process. Prevents crashes on GPUs with smaller memory.')
# Gin config flags.
flags.DEFINE_multi_string('gin_search_path', [],
'Additional gin file search paths.')
flags.DEFINE_multi_string('gin_file', [], 'List of paths to the config files.')
flags.DEFINE_multi_string('gin_param', [],
'Newline separated list of Gin parameter bindings.')
# Evaluation/sampling specific flags.
flags.DEFINE_boolean('run_once', False, 'Whether evaluation will run once.')
flags.DEFINE_integer('initial_delay_secs', None,
'Time to wait before evaluation starts')
GIN_PATH = pkg_resources.resource_filename(__name__, 'gin')
LAST_OPERATIVE_CONFIG_PATH = '/root/trainer/gin/last_config.gin'
def delay_start():
"""Optionally delay the start of the run."""
delay_time = FLAGS.initial_delay_secs
if delay_time:
logging.info('Waiting for %i second(s)', delay_time)
time.sleep(delay_time)
def parse_gin(restore_dir):
"""Parse gin config from --gin_file, --gin_param, and the model directory."""
# Add user folders to the gin search path.
for gin_search_path in [GIN_PATH] + FLAGS.gin_search_path:
gin.add_config_file_search_path(gin_search_path)
# Parse gin configs, later calls override earlier ones.
with gin.unlock_config():
# Optimization defaults.
use_tpu = bool(FLAGS.tpu)
opt_default = 'base.gin' if not use_tpu else 'base_tpu.gin'
gin.parse_config_file(os.path.join('optimization', opt_default))
# Load operative_config if it exists (model has already trained).
# operative_config = train_util.get_latest_operative_config(restore_dir)
# if tf.io.gfile.exists(operative_config):
# # Copy the config file from gstorage
# helper_functions.copy_config_file_from_gstorage(operative_config, LAST_OPERATIVE_CONFIG_PATH)
# logging.info('Using operative config: %s', operative_config)
# gin.parse_config_file(LAST_OPERATIVE_CONFIG_PATH, skip_unknown=True)
# gin.parse_config_file(operative_config, skip_unknown=True)
# User gin config and user hyperparameters from flags.
gin.parse_config_files_and_bindings(
FLAGS.gin_file, FLAGS.gin_param, skip_unknown=True)
def allow_memory_growth():
"""Sets the GPUs to grow the memory usage as is needed by the process."""
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs.
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized.
print(e)
def main(unused_argv):
"""Parse gin config and run ddsp training, evaluation, or sampling."""
restore_dir = os.path.expanduser(FLAGS.restore_dir)
save_dir = os.path.expanduser(FLAGS.save_dir)
# If no separate restore directory is given, use the save directory.
restore_dir = save_dir if not restore_dir else restore_dir
logging.info('Restore Dir: %s', restore_dir)
logging.info('Save Dir: %s', save_dir)
parse_gin(restore_dir)
if FLAGS.allow_memory_growth:
allow_memory_growth()
# Training.
if FLAGS.mode == 'train':
strategy = train_util.get_strategy(tpu=FLAGS.tpu, gpus=FLAGS.gpu)
with strategy.scope():
model = models.get_model()
trainer = trainers.Trainer(model, strategy)
train_util.train(data_provider=gin.REQUIRED,
trainer=trainer,
save_dir=save_dir,
restore_dir=restore_dir)
# Evaluation.
elif FLAGS.mode == 'eval':
model = models.get_model()
delay_start()
eval_util.evaluate(data_provider=gin.REQUIRED,
model=model,
save_dir=save_dir,
restore_dir=restore_dir,
run_once=FLAGS.run_once)
# Sampling.
elif FLAGS.mode == 'sample':
model = models.get_model()
delay_start()
eval_util.sample(data_provider=gin.REQUIRED,
model=model,
save_dir=save_dir,
restore_dir=restore_dir,
run_once=FLAGS.run_once)
def console_entry_point():
"""From pip installed script."""
app.run(main)
if __name__ == '__main__':
console_entry_point()
| apache-2.0 | 150,508,015,260,629,540 | 35.261682 | 101 | 0.628737 | false |
jhajek/euca2ools | euca2ools/commands/autoscaling/deleteautoscalinggroup.py | 6 | 2065 | # Copyright 2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
from euca2ools.commands.autoscaling import AutoScalingRequest
from requestbuilder import Arg
class DeleteAutoScalingGroup(AutoScalingRequest):
DESCRIPTION = 'Delete an auto-scaling group'
ARGS = [Arg('AutoScalingGroupName', metavar='ASGROUP',
help='name of the auto-scaling group to delete (required)'),
Arg('-d', '--force-delete', dest='ForceDelete',
action='store_const', const='true',
help='''delete the group and all of its instances without
waiting for all instances to terminate'''),
Arg('-f', '--force', action='store_true', route_to=None,
help=argparse.SUPPRESS)] # for compatibility
| bsd-2-clause | 1,306,719,623,280,529,700 | 50.625 | 76 | 0.73753 | false |
faush01/plugin.video.embycon | resources/lib/menu_functions.py | 1 | 46090 | # coding=utf-8
# Gnu General Public License - see LICENSE.TXT
import os
import sys
import json
import urllib
import base64
import xbmcplugin
import xbmcaddon
import xbmc
from .downloadutils import DownloadUtils
from .kodi_utils import add_menu_directory_item, HomeWindow
from .simple_logging import SimpleLogging
from .translation import string_load
from .datamanager import DataManager
from .utils import get_art, get_emby_url
from .custom_nodes import CustomNode, load_custom_nodes
log = SimpleLogging(__name__)
downloadUtils = DownloadUtils()
__addon__ = xbmcaddon.Addon()
def show_movie_tags(menu_params):
log.debug("show_movie_tags: {0}", menu_params)
parent_id = menu_params.get("parent_id")
url_params = {}
url_params["UserId"] = "{userid}"
url_params["SortBy"] = "SortName"
url_params["SortOrder"] = "Ascending"
url_params["CollapseBoxSetItems"] = False
url_params["GroupItemsIntoCollections"] = False
url_params["Recursive"] = True
url_params["IsMissing"] = False
url_params["EnableTotalRecordCount"] = False
url_params["EnableUserData"] = False
url_params["IncludeItemTypes"] = "Movie"
if parent_id:
url_params["ParentId"] = parent_id
url = get_emby_url("{server}/emby/Tags", url_params)
data_manager = DataManager()
result = data_manager.get_content(url)
if not result:
return
tags = result.get("Items")
log.debug("Tags : {0}", result)
for tag in tags:
name = tag["Name"]
tag_id = tag["Id"]
url_params = {}
url_params["IncludeItemTypes"] = "Movie"
url_params["CollapseBoxSetItems"] = False
url_params["GroupItemsIntoCollections"] = False
url_params["Recursive"] = True
url_params["IsMissing"] = False
url_params["ImageTypeLimit"] = 1
url_params["SortBy"] = "Name"
url_params["SortOrder"] = "Ascending"
url_params["Fields"] = "{field_filters}"
url_params["TagIds"] = tag_id
if parent_id:
menu_params["ParentId"] = parent_id
item_url = get_emby_url("{server}/emby/Users/{userid}/Items", url_params)
art = {"thumb": "http://localhost:24276/" + base64.b64encode(item_url)}
content_url = urllib.quote(item_url)
url = sys.argv[0] + ("?url=" +
content_url +
"&mode=GET_CONTENT" +
"&media_type=movies")
log.debug("addMenuDirectoryItem: {0} - {1}", name, url)
add_menu_directory_item(name, url, art=art)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def show_movie_years(menu_params):
log.debug("show_movie_years: {0}", menu_params)
parent_id = menu_params.get("parent_id")
group_into_decades = menu_params.get("group") == "true"
url_params = {}
url_params["UserId"] = "{userid}"
url_params["SortBy"] = "SortName"
url_params["SortOrder"] = "Ascending"
url_params["CollapseBoxSetItems"] = False
url_params["GroupItemsIntoCollections"] = False
url_params["Recursive"] = True
url_params["IsMissing"] = False
url_params["EnableTotalRecordCount"] = False
url_params["EnableUserData"] = False
url_params["IncludeItemTypes"] = "Movie"
if parent_id:
url_params["ParentId"] = parent_id
url = get_emby_url("{server}/emby/Years", url_params)
data_manager = DataManager()
result = data_manager.get_content(url)
if not result:
return
years_list = result.get("Items")
result_names = {}
for year in years_list:
name = year.get("Name")
if group_into_decades:
year_int = int(name)
decade = str(year_int - year_int % 10)
decade_end = str((year_int - year_int % 10) + 9)
decade_name = decade + "-" + decade_end
result_names[decade_name] = year_int - year_int % 10
else:
result_names[name] = [name]
keys = list(result_names.keys())
keys.sort()
if group_into_decades:
for decade_key in keys:
year_list = []
decade_start = result_names[decade_key]
for include_year in range(decade_start, decade_start + 10):
year_list.append(str(include_year))
result_names[decade_key] = year_list
for year in keys:
name = year
value = ",".join(result_names[year])
params = {}
params["IncludeItemTypes"] = "Movie"
params["CollapseBoxSetItems"] = False
params["GroupItemsIntoCollections"] = False
params["Recursive"] = True
params["IsMissing"] = False
params["ImageTypeLimit"] = 1
params["SortBy"] = "Name"
params["SortOrder"] = "Ascending"
params["Fields"] = "{field_filters}"
params["Years"] = value
if parent_id:
params["ParentId"] = parent_id
item_url = get_emby_url("{server}/emby/Users/{userid}/Items", params)
art = {"thumb": "http://localhost:24276/" + base64.b64encode(item_url)}
content_url = urllib.quote(item_url)
url = sys.argv[0] + ("?url=" +
content_url +
"&mode=GET_CONTENT" +
"&media_type=movies")
log.debug("addMenuDirectoryItem: {0} - {1}", name, url)
add_menu_directory_item(name, url, art=art)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def show_movie_pages(menu_params):
log.debug("showMoviePages: {0}", menu_params)
parent_id = menu_params.get("parent_id")
settings = xbmcaddon.Addon()
group_movies = settings.getSetting('group_movies') == "true"
params = {}
params["IncludeItemTypes"] = "Movie"
params["CollapseBoxSetItems"] = str(group_movies)
params["GroupItemsIntoCollections"] = str(group_movies)
params["Recursive"] = True
params["IsMissing"] = False
params["ImageTypeLimit"] = 0
if parent_id:
params["ParentId"] = parent_id
url = get_emby_url("{server}/emby/Users/{userid}/Items", params)
data_manager = DataManager()
result = data_manager.get_content(url)
if result is None:
return
total_results = result.get("TotalRecordCount", 0)
log.debug("showMoviePages TotalRecordCount {0}", total_results)
if result == 0:
return
page_limit = int(settings.getSetting('moviePageSize'))
if page_limit == 0:
page_limit = 20
start_index = 0
collections = []
while start_index < total_results:
params = {}
params["IncludeItemTypes"] = "Movie"
params["CollapseBoxSetItems"] = str(group_movies)
params["GroupItemsIntoCollections"] = str(group_movies)
params["Recursive"] = True
params["IsMissing"] = False
params["ImageTypeLimit"] = 1
params["SortBy"] = "Name"
params["SortOrder"] = "Ascending"
params["Fields"] = "{field_filters}"
params["StartIndex"] = start_index
params["Limit"] = page_limit
if parent_id:
params["ParentId"] = parent_id
item_url = get_emby_url("{server}/emby/Users/{userid}/Items", params)
page_upper = start_index + page_limit
if page_upper > total_results:
page_upper = total_results
item_data = {}
item_data['title'] = "Page (" + str(start_index + 1) + " - " + str(page_upper) + ")"
item_data['path'] = item_url
item_data['media_type'] = 'movies'
item_data["art"] = {"thumb": "http://localhost:24276/" + base64.b64encode(item_url)}
collections.append(item_data)
start_index = start_index + page_limit
for collection in collections:
content_url = urllib.quote(collection['path'])
url = sys.argv[0] + ("?url=" + content_url +
"&mode=GET_CONTENT" +
"&media_type=" + collection["media_type"])
log.debug("addMenuDirectoryItem: {0} - {1} - {2}", collection.get('title'), url, collection.get("art"))
add_menu_directory_item(collection.get('title', string_load(30250)), url, art=collection.get("art"))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def show_genre_list(menu_params):
log.debug("showGenreList: {0}", menu_params)
server = downloadUtils.get_server()
if server is None:
return
parent_id = menu_params.get("parent_id")
item_type = menu_params.get("item_type")
kodi_type = "Movies"
emby_type = "Movie"
if item_type is not None and item_type == "tvshow":
emby_type = "Series"
kodi_type = "tvshows"
params = {}
params["IncludeItemTypes"] = emby_type
params["UserId"] = "{userid}"
params["Recursive"] = True
params["SortBy"] = "Name"
params["SortOrder"] = "Ascending"
params["ImageTypeLimit"] = 1
if parent_id is not None:
params["ParentId"] = parent_id
url = get_emby_url("{server}/emby/Genres", params)
data_manager = DataManager()
result = data_manager.get_content(url)
if result is not None:
result = result.get("Items")
else:
result = []
settings = xbmcaddon.Addon()
group_movies = settings.getSetting('group_movies') == "true"
collections = []
xbmcplugin.setContent(int(sys.argv[1]), 'genres')
for genre in result:
item_data = {}
item_data['title'] = genre.get("Name")
item_data['media_type'] = kodi_type
# art = getArt(item=genre, server=server)
# item_data['art'] = art
params = {}
params["Recursive"] = True
params["CollapseBoxSetItems"] = str(group_movies)
params["GroupItemsIntoCollections"] = str(group_movies)
params["GenreIds"] = genre.get("Id")
params["IncludeItemTypes"] = emby_type
params["ImageTypeLimit"] = 1
params["Fields"] = "{field_filters}"
if parent_id is not None:
params["ParentId"] = parent_id
url = get_emby_url("{server}/emby/Users/{userid}/Items", params)
art = {"thumb": "http://localhost:24276/" + base64.b64encode(url)}
item_data['art'] = art
item_data['path'] = url
collections.append(item_data)
for collection in collections:
url = sys.argv[0] + ("?url=" + urllib.quote(collection['path']) +
"&mode=GET_CONTENT" +
"&media_type=" + collection["media_type"])
log.debug("addMenuDirectoryItem: {0} - {1} - {2}", collection.get('title'), url, collection.get("art"))
add_menu_directory_item(collection.get('title', string_load(30250)), url, art=collection.get("art"))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def show_movie_alpha_list(menu_params):
log.debug("== ENTER: showMovieAlphaList() ==")
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
settings = xbmcaddon.Addon()
server = downloadUtils.get_server()
if server is None:
return
group_movies = settings.getSetting('group_movies') == "true"
parent_id = menu_params.get("parent_id")
url_params = {}
url_params["IncludeItemTypes"] = "Movie"
url_params["Recursive"] = True
url_params["GroupItemsIntoCollections"] = group_movies
url_params["UserId"] = "{userid}"
url_params["SortBy"] = "Name"
url_params["SortOrder"] = "Ascending"
if parent_id is not None:
url_params["ParentId"] = parent_id
prefix_url = get_emby_url("{server}/emby/Items/Prefixes", url_params)
data_manager = DataManager()
result = data_manager.get_content(prefix_url)
if not result:
return
alpha_list = []
for prefix in result:
alpha_list.append(prefix.get("Name"))
collections = []
for alphaName in alpha_list:
item_data = {}
item_data['title'] = alphaName
item_data['media_type'] = "Movies"
params = {}
params["Fields"] = "{field_filters}"
params["CollapseBoxSetItems"] = group_movies
params["GroupItemsIntoCollections"] = group_movies
params["Recursive"] = True
params["IncludeItemTypes"] = "Movie"
params["SortBy"] = "Name"
params["SortOrder"] = "Ascending"
params["ImageTypeLimit"] = 1
if parent_id is not None:
params["ParentId"] = parent_id
if alphaName == "#":
params["NameLessThan"] = "A"
else:
params["NameStartsWith"] = alphaName
url = get_emby_url("{server}/emby/Users/{userid}/Items", params)
item_data['path'] = url
art = {"thumb": "http://localhost:24276/" + base64.b64encode(url)}
item_data['art'] = art
collections.append(item_data)
for collection in collections:
url = (sys.argv[0] + "?url=" + urllib.quote(collection['path']) +
"&mode=GET_CONTENT&media_type=" + collection["media_type"])
log.debug("addMenuDirectoryItem: {0} ({1})", collection.get('title'), url)
add_menu_directory_item(collection.get('title', string_load(30250)), url, art=collection.get("art"))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def show_tvshow_alpha_list(menu_params):
log.debug("== ENTER: showTvShowAlphaList() ==")
server = downloadUtils.get_server()
if server is None:
return
parent_id = menu_params.get("parent_id")
url_params = {}
url_params["IncludeItemTypes"] = "Series"
url_params["Recursive"] = True
url_params["UserId"] = "{userid}"
url_params["SortBy"] = "Name"
url_params["SortOrder"] = "Ascending"
if parent_id is not None:
menu_params["ParentId"] = parent_id
prefix_url = get_emby_url("{server}/emby/Items/Prefixes", url_params)
data_manager = DataManager()
result = data_manager.get_content(prefix_url)
if not result:
return
alpha_list = []
for prefix in result:
alpha_list.append(prefix.get("Name"))
collections = []
for alpha_name in alpha_list:
item_data = {}
item_data['title'] = alpha_name
item_data['media_type'] = "tvshows"
params = {}
params["Fields"] = "{field_filters}"
params["ImageTypeLimit"] = 1
params["IncludeItemTypes"] = "Series"
params["SortBy"] = "Name"
params["SortOrder"] = "Ascending"
params["Recursive"] = True
params["IsMissing"] = False
if parent_id is not None:
params["ParentId"] = parent_id
if alpha_name == "#":
params["NameLessThan"] = "A"
else:
params["NameStartsWith"] = alpha_name
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
item_data['path'] = path
art = {"thumb": "http://localhost:24276/" + base64.b64encode(path)}
item_data['art'] = art
collections.append(item_data)
for collection in collections:
url = (sys.argv[0] + "?url=" + urllib.quote(collection['path']) +
"&mode=GET_CONTENT&media_type=" + collection["media_type"])
log.debug("addMenuDirectoryItem: {0} ({1})", collection.get('title'), url)
add_menu_directory_item(collection.get('title', string_load(30250)), url, art=collection.get("art"))
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def display_main_menu():
handle = int(sys.argv[1])
xbmcplugin.setContent(handle, 'files')
add_menu_directory_item(string_load(30406),
"plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=library")
add_menu_directory_item(string_load(30407),
"plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=show_global_types")
add_menu_directory_item(string_load(30408),
"plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=show_custom_widgets")
add_menu_directory_item(string_load(30409),
"plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=addon_items")
add_menu_directory_item("Custom Nodes",
"plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=custom_nodes")
xbmcplugin.endOfDirectory(handle)
def display_menu(params):
menu_type = params.get("type")
if menu_type == "library":
display_library_views(params)
elif menu_type == "library_item":
display_library_view(params)
elif menu_type == "show_global_types":
show_global_types(params)
elif menu_type == "global_list_movies":
display_movies_type(params, None)
elif menu_type == "global_list_tvshows":
display_tvshow_type(params, None)
elif menu_type == "show_custom_widgets":
show_widgets()
elif menu_type == "addon_items":
display_addon_menu(params)
elif menu_type == "show_movie_years":
show_movie_years(params)
elif menu_type == "show_movie_tags":
show_movie_tags(params)
elif menu_type == "custom_nodes":
show_custom_nodes(params)
elif menu_type == "create_new_node":
create_new_node(params)
def create_new_node(params):
log.debug("Create New Custom Node")
addon = xbmcaddon.Addon()
addon_path = addon.getAddonInfo('path')
skin_path = xbmc.translatePath(os.path.join(addon_path))
custom_node = CustomNode("CustomNode.xml", skin_path, "default", "720p")
#custom_node.setActionItems(action_items)
custom_node.doModal()
def get_node_url(node_info):
log.debug("get_node_url : {0}", node_info)
base_params = {}
base_params["Fields"] = "{field_filters}"
base_params["ImageTypeLimit"] = 1
base_params["IsMissing"] = False
if "item_parent" in node_info and node_info["item_parent"]:
base_params["ParentId"] = node_info["item_parent"]
if "recursive" in node_info and node_info["recursive"]:
base_params["Recursive"] = node_info["recursive"]
if "item_type" in node_info and node_info["item_type"]:
base_params["IncludeItemTypes"] = node_info["item_type"]
if "item_limit" in node_info and node_info["item_limit"]:
base_params["Limit"] = node_info["item_limit"]
if "group" in node_info and node_info["group"]:
base_params["GroupItemsIntoCollections"] = node_info["group"]
base_params["CollapseBoxSetItems"] = node_info["group"]
if "watched" in node_info and node_info["watched"]:
base_params["IsPlayed"] = node_info["watched"]
if "inprogress" in node_info and node_info["inprogress"] == "True":
base_params["Filters"] = "IsResumable"
if "sortby" in node_info and node_info["sortby"]:
base_params["SortBy"] = node_info["sortby"]
if "sortorder" in node_info and node_info["sortorder"]:
base_params["SortOrder"] = node_info["sortorder"]
path = get_emby_url("{server}/emby/Users/{userid}/Items", base_params)
return path
def show_custom_nodes(params):
log.debug("Show Custom Nodes")
add_menu_directory_item("[Edit Nodes]",
"plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=create_new_node")
# show custom nodes
custom_nodes = load_custom_nodes()
node_names = []
for node_name in custom_nodes:
node_names.append(node_name)
node_names.sort()
for node_name in node_names:
encoded_name = urllib.quote_plus(node_name)
add_menu_directory_item(node_name,
"plugin://plugin.video.embycon/?mode=SHOW_NODE_CONTENT&node_name=" + encoded_name)
handle = int(sys.argv[1])
xbmcplugin.endOfDirectory(handle)
def show_global_types(params):
handle = int(sys.argv[1])
add_menu_directory_item(string_load(30256),
"plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=global_list_movies")
add_menu_directory_item(string_load(30261),
"plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=global_list_tvshows")
xbmcplugin.endOfDirectory(handle)
def display_homevideos_type(menu_params, view):
handle = int(sys.argv[1])
view_name = view.get("Name")
settings = xbmcaddon.Addon()
show_x_filtered_items = settings.getSetting("show_x_filtered_items")
hide_watched = settings.getSetting("hide_watched") == "true"
# All Home Movies
base_params = {}
base_params["ParentId"] = view.get("Id")
base_params["Recursive"] = False
base_params["IsMissing"] = False
base_params["Fields"] = "{field_filters}"
base_params["ImageTypeLimit"] = 1
path = get_emby_url("{server}/emby/Users/{userid}/Items", base_params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=homevideos"
add_menu_directory_item(view_name + string_load(30405), url)
# In progress home movies
params = {}
params.update(base_params)
params["Filters"] = "IsResumable"
params["Recursive"] = True
params["Limit"] = "{ItemLimit}"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=homevideos"
add_menu_directory_item(view_name + string_load(30267) + " (" + show_x_filtered_items + ")", url)
# Recently added
params = {}
params.update(base_params)
params["Recursive"] = True
params["SortBy"] = "DateCreated"
params["SortOrder"] = "Descending"
params["Filters"] = "IsNotFolder"
if hide_watched:
params["IsPlayed"] = False
params["Limit"] = "{ItemLimit}"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=homevideos"
add_menu_directory_item(view_name + string_load(30268) + " (" + show_x_filtered_items + ")", url)
xbmcplugin.endOfDirectory(handle)
def display_addon_menu(params):
add_menu_directory_item(string_load(30246), "plugin://plugin.video.embycon/?mode=SEARCH")
add_menu_directory_item(string_load(30017), "plugin://plugin.video.embycon/?mode=SHOW_SERVER_SESSIONS")
add_menu_directory_item(string_load(30012), "plugin://plugin.video.embycon/?mode=CHANGE_USER")
add_menu_directory_item(string_load(30011), "plugin://plugin.video.embycon/?mode=DETECT_SERVER_USER")
add_menu_directory_item(string_load(30435), "plugin://plugin.video.embycon/?mode=DETECT_CONNECTION_SPEED")
add_menu_directory_item(string_load(30254), "plugin://plugin.video.embycon/?mode=SHOW_SETTINGS")
add_menu_directory_item(string_load(30395), "plugin://plugin.video.embycon/?mode=CLEAR_CACHE")
add_menu_directory_item(string_load(30293), "plugin://plugin.video.embycon/?mode=CACHE_ARTWORK")
# add_menu_directory_item("Clone default skin", "plugin://plugin.video.embycon/?mode=CLONE_SKIN")
handle = int(sys.argv[1])
xbmcplugin.endOfDirectory(handle)
def display_tvshow_type(menu_params, view):
handle = int(sys.argv[1])
view_name = string_load(30261)
if view is not None:
view_name = view.get("Name")
settings = xbmcaddon.Addon()
show_x_filtered_items = settings.getSetting("show_x_filtered_items")
# All TV Shows
base_params = {}
if view is not None:
base_params["ParentId"] = view.get("Id")
base_params["Fields"] = "{field_filters}"
base_params["ImageTypeLimit"] = 1
base_params["IsMissing"] = False
base_params["IncludeItemTypes"] = "Series"
base_params["Recursive"] = True
path = get_emby_url("{server}/emby/Users/{userid}/Items", base_params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=tvshows"
add_menu_directory_item(view_name + string_load(30405), url)
# Favorite TV Shows
params = {}
params.update(base_params)
params["Filters"] = "IsFavorite"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=tvshows"
add_menu_directory_item(view_name + string_load(30414), url)
# Tv Shows with unplayed
params = {}
params.update(base_params)
params["IsPlayed"] = False
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=tvshows"
add_menu_directory_item(view_name + string_load(30285), url)
# In progress episodes
params = {}
params.update(base_params)
params["Limit"] = "{ItemLimit}"
params["SortBy"] = "DatePlayed"
params["SortOrder"] = "Descending"
params["Filters"] = "IsResumable"
params["IncludeItemTypes"] = "Episode"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=Episodes&sort=none"
url += "&name_format=" + urllib.quote('Episode|episode_name_format')
add_menu_directory_item(view_name + string_load(30267) + " (" + show_x_filtered_items + ")", url)
# Latest Episodes
params = {}
params.update(base_params)
params["Limit"] = "{ItemLimit}"
params["SortBy"] = "DateCreated"
params["SortOrder"] = "Descending"
params["IncludeItemTypes"] = "Episode"
path = get_emby_url("{server}/emby/Users/{userid}/Items/Latest", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=tvshows&sort=none"
add_menu_directory_item(view_name + string_load(30288) + " (" + show_x_filtered_items + ")", url)
# Recently Added
params = {}
params.update(base_params)
params["Limit"] = "{ItemLimit}"
params["SortBy"] = "DateCreated"
params["SortOrder"] = "Descending"
params["Filters"] = "IsNotFolder"
params["IncludeItemTypes"] = "Episode"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=Episodes&sort=none"
url += "&name_format=" + urllib.quote('Episode|episode_name_format')
add_menu_directory_item(view_name + string_load(30268) + " (" + show_x_filtered_items + ")", url)
# Next Up Episodes
params = {}
params.update(base_params)
params["Limit"] = "{ItemLimit}"
params["Userid"] = "{userid}"
params["SortBy"] = "DateCreated"
params["SortOrder"] = "Descending"
params["Filters"] = "IsNotFolder"
params["IncludeItemTypes"] = "Episode"
params["Legacynextup"] = "true"
path = get_emby_url("{server}/emby/Shows/NextUp", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=Episodes&sort=none"
url += "&name_format=" + urllib.quote('Episode|episode_name_format')
add_menu_directory_item(view_name + string_load(30278) + " (" + show_x_filtered_items + ")", url)
# TV Show Genres
path = "plugin://plugin.video.embycon/?mode=GENRES&item_type=tvshow"
if view is not None:
path += "&parent_id=" + view.get("Id")
add_menu_directory_item(view_name + string_load(30325), path)
# TV Show Alpha picker
path = "plugin://plugin.video.embycon/?mode=TVSHOW_ALPHA"
if view is not None:
path += "&parent_id=" + view.get("Id")
add_menu_directory_item(view_name + string_load(30404), path)
xbmcplugin.endOfDirectory(handle)
def display_music_type(menu_params, view):
handle = int(sys.argv[1])
view_name = view.get("Name")
settings = xbmcaddon.Addon()
show_x_filtered_items = settings.getSetting("show_x_filtered_items")
# all albums
params = {}
params["ParentId"] = view.get("Id")
params["Recursive"] = True
params["ImageTypeLimit"] = 1
params["IncludeItemTypes"] = "MusicAlbum"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=MusicAlbums"
add_menu_directory_item(view_name + string_load(30320), url)
# recently added
params = {}
params["ParentId"] = view.get("Id")
params["ImageTypeLimit"] = 1
params["IncludeItemTypes"] = "Audio"
params["Limit"] = "{ItemLimit}"
path = get_emby_url("{server}/emby/Users/{userid}/Items/Latest", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=MusicAlbums"
add_menu_directory_item(view_name + string_load(30268) + " (" + show_x_filtered_items + ")", url)
# recently played
params = {}
params["ParentId"] = view.get("Id")
params["Recursive"] = True
params["ImageTypeLimit"] = 1
params["IncludeItemTypes"] = "Audio"
params["Limit"] = "{ItemLimit}"
params["IsPlayed"] = True
params["SortBy"] = "DatePlayed"
params["SortOrder"] = "Descending"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=MusicAlbum"
add_menu_directory_item(view_name + string_load(30349) + " (" + show_x_filtered_items + ")", url)
# most played
params = {}
params["ParentId"] = view.get("Id")
params["Recursive"] = True
params["ImageTypeLimit"] = 1
params["IncludeItemTypes"] = "Audio"
params["Limit"] = "{ItemLimit}"
params["IsPlayed"] = True
params["SortBy"] = "PlayCount"
params["SortOrder"] = "Descending"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=MusicAlbum"
add_menu_directory_item(view_name + string_load(30353) + " (" + show_x_filtered_items + ")", url)
# artists
params = {}
params["ParentId"] = view.get("Id")
params["Recursive"] = True
params["ImageTypeLimit"] = 1
path = get_emby_url("{server}/emby/Artists/AlbumArtists", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=MusicArtists"
add_menu_directory_item(view_name + string_load(30321), url)
xbmcplugin.endOfDirectory(handle)
def display_musicvideos_type(params, view):
handle = int(sys.argv[1])
xbmcplugin.setContent(handle, 'files')
view_name = view.get("Name")
# artists
params = {}
params["ParentId"] = view.get("Id")
params["Recursive"] = False
params["ImageTypeLimit"] = 1
params["IsMissing"] = False
params["Fields"] = "{field_filters}"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=musicvideos"
add_menu_directory_item(view_name + string_load(30405), url)
xbmcplugin.endOfDirectory(handle)
def display_livetv_type(menu_params, view):
handle = int(sys.argv[1])
xbmcplugin.setContent(handle, 'files')
view_name = view.get("Name")
# channels
params = {}
params["UserId"] = "{userid}"
params["Recursive"] = False
params["ImageTypeLimit"] = 1
params["Fields"] = "{field_filters}"
path = get_emby_url("{server}/emby/LiveTv/Channels", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=livetv"
add_menu_directory_item(view_name + string_load(30360), url)
# programs
params = {}
params["UserId"] = "{userid}"
params["IsAiring"] = True
params["ImageTypeLimit"] = 1
params["Fields"] = "ChannelInfo,{field_filters}"
params["EnableTotalRecordCount"] = False
path = get_emby_url("{server}/emby/LiveTv/Programs/Recommended", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=livetv"
add_menu_directory_item(view_name + string_load(30361), url)
# recordings
params = {}
params["UserId"] = "{userid}"
params["Recursive"] = False
params["ImageTypeLimit"] = 1
params["Fields"] = "{field_filters}"
params["EnableTotalRecordCount"] = False
path = get_emby_url("{server}/emby/LiveTv/Recordings", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=livetv"
add_menu_directory_item(view_name + string_load(30362), url)
xbmcplugin.endOfDirectory(handle)
def display_movies_type(menu_params, view):
handle = int(sys.argv[1])
xbmcplugin.setContent(handle, 'files')
view_name = string_load(30256)
if view is not None:
view_name = view.get("Name")
settings = xbmcaddon.Addon()
show_x_filtered_items = settings.getSetting("show_x_filtered_items")
group_movies = settings.getSetting('group_movies') == "true"
hide_watched = settings.getSetting("hide_watched") == "true"
base_params = {}
if view is not None:
base_params["ParentId"] = view.get("Id")
base_params["IncludeItemTypes"] = "Movie"
base_params["CollapseBoxSetItems"] = str(group_movies)
base_params["GroupItemsIntoCollections"] = str(group_movies)
base_params["Recursive"] = True
base_params["IsMissing"] = False
base_params["Fields"] = "{field_filters}"
base_params["ImageTypeLimit"] = 1
# All Movies
path = get_emby_url("{server}/emby/Users/{userid}/Items", base_params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=movies"
add_menu_directory_item(view_name + string_load(30405), url)
# Favorite Movies
params = {}
params.update(base_params)
params["CollapseBoxSetItems"] = False
params["GroupItemsIntoCollections"] = False
params["Filters"] = "IsFavorite"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=movies"
add_menu_directory_item(view_name + string_load(30414), url)
# Unwatched Movies
params = {}
params.update(base_params)
params["CollapseBoxSetItems"] = False
params["GroupItemsIntoCollections"] = False
params["IsPlayed"] = False
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=movies"
add_menu_directory_item(view_name + string_load(30285), url)
# Recently Watched Movies
params = {}
params.update(base_params)
params["IsPlayed"] = True
params["SortBy"] = "DatePlayed"
params["SortOrder"] = "Descending"
params["CollapseBoxSetItems"] = False
params["GroupItemsIntoCollections"] = False
params["Limit"] = "{ItemLimit}"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=movies&sort=none"
add_menu_directory_item(view_name + string_load(30349) + " (" + show_x_filtered_items + ")", url)
# Resumable Movies
params = {}
params.update(base_params)
params["Filters"] = "IsResumable"
params["SortBy"] = "DatePlayed"
params["SortOrder"] = "Descending"
params["Limit"] = "{ItemLimit}"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=movies&sort=none"
add_menu_directory_item(view_name + string_load(30267) + " (" + show_x_filtered_items + ")", url)
# Recently Added Movies
params = {}
params.update(base_params)
if hide_watched:
params["IsPlayed"] = False
params["SortBy"] = "DateCreated"
params["SortOrder"] = "Descending"
params["Filters"] = "IsNotFolder"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=movies&sort=none"
add_menu_directory_item(view_name + string_load(30268) + " (" + show_x_filtered_items + ")", url)
# Collections
params = {}
if view is not None:
params["ParentId"] = view.get("Id")
params["Fields"] = "{field_filters}"
params["ImageTypeLimit"] = 1
params["IncludeItemTypes"] = "Boxset"
params["Recursive"] = True
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=boxsets"
add_menu_directory_item(view_name + string_load(30410), url)
# Favorite Collections
params["Filters"] = "IsFavorite"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=boxsets"
add_menu_directory_item(view_name + string_load(30415), url)
# Genres
path = "plugin://plugin.video.embycon/?mode=GENRES&item_type=movie"
if view is not None:
path += "&parent_id=" + view.get("Id")
add_menu_directory_item(view_name + string_load(30325), path)
# Pages
path = "plugin://plugin.video.embycon/?mode=MOVIE_PAGES"
if view is not None:
path += "&parent_id=" + view.get("Id")
add_menu_directory_item(view_name + string_load(30397), path)
# Alpha Picker
path = "plugin://plugin.video.embycon/?mode=MOVIE_ALPHA"
if view is not None:
path += "&parent_id=" + view.get("Id")
add_menu_directory_item(view_name + string_load(30404), path)
# Years
path = "plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=show_movie_years"
if view is not None:
path += "&parent_id=" + view.get("Id")
add_menu_directory_item(view_name + string_load(30411), path)
# Decades
path = "plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=show_movie_years&group=true"
if view is not None:
path += "&parent_id=" + view.get("Id")
add_menu_directory_item(view_name + string_load(30412), path)
# Tags
path = "plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=show_movie_tags"
if view is not None:
path += "&parent_id=" + view.get("Id")
add_menu_directory_item(view_name + string_load(30413), path)
xbmcplugin.endOfDirectory(handle)
def display_library_views(params):
handle = int(sys.argv[1])
xbmcplugin.setContent(handle, 'files')
server = downloadUtils.get_server()
if server is None:
return
data_manager = DataManager()
views_url = "{server}/emby/Users/{userid}/Views?format=json"
views = data_manager.get_content(views_url)
if not views:
return []
views = views.get("Items")
view_types = ["movies", "tvshows", "homevideos", "boxsets", "playlists", "music", "musicvideos", "livetv", "Channel"]
for view in views:
collection_type = view.get('CollectionType', None)
item_type = view.get('Type', None)
if collection_type in view_types or item_type == "Channel":
view_name = view.get("Name")
art = get_art(item=view, server=server)
art['landscape'] = downloadUtils.get_artwork(view, "Primary", server=server)
plugin_path = "plugin://plugin.video.embycon/?mode=SHOW_ADDON_MENU&type=library_item&view_id=" + view.get("Id")
if collection_type == "playlists":
plugin_path = get_playlist_path(view)
elif collection_type == "boxsets":
plugin_path = get_collection_path(view)
elif collection_type is None and view.get('Type', None) == "Channel":
plugin_path = get_channel_path(view)
add_menu_directory_item(view_name, plugin_path, art=art)
xbmcplugin.endOfDirectory(handle)
def get_playlist_path(view_info):
params = {}
params["ParentId"] = view_info.get("Id")
params["Fields"] = "{field_filters}"
params["ImageTypeLimit"] = 1
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=playlists"
return url
def get_collection_path(view_info):
params = {}
params["ParentId"] = view_info.get("Id")
params["Fields"] = "{field_filters}"
params["ImageTypeLimit"] = 1
params["IncludeItemTypes"] = "Boxset"
params["CollapseBoxSetItems"] = True
params["GroupItemsIntoCollections"] = True
params["Recursive"] = True
params["IsMissing"] = False
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=boxsets"
return url
def get_channel_path(view):
params = {}
params["ParentId"] = view.get("Id")
params["IsMissing"] = False
params["ImageTypeLimit"] = 1
params["Fields"] = "{field_filters}"
path = get_emby_url("{server}/emby/Users/{userid}/Items", params)
url = sys.argv[0] + "?url=" + urllib.quote(path) + "&mode=GET_CONTENT&media_type=files"
return url
def display_library_view(params):
node_id = params.get("view_id")
view_info_url = "{server}/emby/Users/{userid}/Items/" + node_id
data_manager = DataManager()
view_info = data_manager.get_content(view_info_url)
log.debug("VIEW_INFO : {0}", view_info)
collection_type = view_info.get("CollectionType", None)
if collection_type == "movies":
display_movies_type(params, view_info)
elif collection_type == "tvshows":
display_tvshow_type(params, view_info)
elif collection_type == "homevideos":
display_homevideos_type(params, view_info)
elif collection_type == "music":
display_music_type(params, view_info)
elif collection_type == "musicvideos":
display_musicvideos_type(params, view_info)
elif collection_type == "livetv":
display_livetv_type(params, view_info)
def show_widgets():
settings = xbmcaddon.Addon()
show_x_filtered_items = settings.getSetting("show_x_filtered_items")
add_menu_directory_item("All Movies",
'plugin://plugin.video.embycon/library/movies')
add_menu_directory_item(string_load(30257) + " (" + show_x_filtered_items + ")",
'plugin://plugin.video.embycon/?mode=WIDGET_CONTENT&type=recent_movies')
add_menu_directory_item(string_load(30258) + " (" + show_x_filtered_items + ")",
'plugin://plugin.video.embycon/?mode=WIDGET_CONTENT&type=inprogress_movies')
add_menu_directory_item(string_load(30269) + " (" + show_x_filtered_items + ")",
'plugin://plugin.video.embycon/?mode=WIDGET_CONTENT&type=random_movies')
add_menu_directory_item(string_load(30403) + " (" + show_x_filtered_items + ")",
'plugin://plugin.video.embycon/?mode=WIDGET_CONTENT&type=movie_recommendations')
add_menu_directory_item(string_load(30287) + " (" + show_x_filtered_items + ")",
'plugin://plugin.video.embycon/?mode=WIDGET_CONTENT&type=recent_tvshows')
add_menu_directory_item(string_load(30263) + " (" + show_x_filtered_items + ")",
'plugin://plugin.video.embycon/?mode=WIDGET_CONTENT&type=recent_episodes')
add_menu_directory_item(string_load(30264) + " (" + show_x_filtered_items + ")",
'plugin://plugin.video.embycon/?mode=WIDGET_CONTENT&type=inprogress_episodes')
add_menu_directory_item(string_load(30265) + " (" + show_x_filtered_items + ")",
'plugin://plugin.video.embycon/?mode=WIDGET_CONTENT&type=nextup_episodes')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def show_search():
add_menu_directory_item(string_load(30231), 'plugin://plugin.video.embycon/?mode=NEW_SEARCH&item_type=Movie')
add_menu_directory_item(string_load(30229), 'plugin://plugin.video.embycon/?mode=NEW_SEARCH&item_type=Series')
add_menu_directory_item(string_load(30235), 'plugin://plugin.video.embycon/?mode=NEW_SEARCH&item_type=Episode')
add_menu_directory_item(string_load(30337), 'plugin://plugin.video.embycon/?mode=NEW_SEARCH&item_type=Audio')
add_menu_directory_item(string_load(30338), 'plugin://plugin.video.embycon/?mode=NEW_SEARCH&item_type=MusicAlbum')
add_menu_directory_item(string_load(30339), 'plugin://plugin.video.embycon/?mode=NEW_SEARCH&item_type=Person')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
def set_library_window_values(force=False):
log.debug("set_library_window_values Called forced={0}", force)
home_window = HomeWindow()
already_set = home_window.get_property("view_item.0.name")
if not force and already_set:
return
for index in range(0, 20):
home_window.clear_property("view_item.%i.name" % index)
home_window.clear_property("view_item.%i.id" % index)
home_window.clear_property("view_item.%i.type" % index)
home_window.clear_property("view_item.%i.thumb" % index)
data_manager = DataManager()
url = "{server}/emby/Users/{userid}/Views"
result = data_manager.get_content(url)
if result is None:
return
result = result.get("Items")
server = downloadUtils.get_server()
index = 0
for item in result:
collection_type = item.get("CollectionType")
if collection_type in ["movies", "boxsets", "music", "tvshows"]:
name = item.get("Name")
item_id = item.get("Id")
# plugin.video.embycon-
prop_name = "view_item.%i.name" % index
home_window.set_property(prop_name, name)
log.debug("set_library_window_values: plugin.video.embycon-{0}={1}", prop_name, name)
prop_name = "view_item.%i.id" % index
home_window.set_property(prop_name, item_id)
log.debug("set_library_window_values: plugin.video.embycon-{0}={1}", prop_name, item_id)
prop_name = "view_item.%i.type" % index
home_window.set_property(prop_name, collection_type)
log.debug("set_library_window_values: plugin.video.embycon-{0}={1}", prop_name, collection_type)
thumb = downloadUtils.get_artwork(item, "Primary", server=server)
prop_name = "view_item.%i.thumb" % index
home_window.set_property(prop_name, thumb)
log.debug("set_library_window_values: plugin.video.embycon-{0}={1}", prop_name, thumb)
index += 1
| gpl-2.0 | -4,794,939,632,383,615,000 | 36.199354 | 123 | 0.62263 | false |
bzero/bitex | apps/pyblinktrade/pyblinktrade/project_options.py | 2 | 1297 | class ProjectOptions(object):
def __init__(self, config, section):
self.config = config
self.section = section
def make_getters(tag):
@property
def _getter(self):
raw_str = self.config.get(self.section, tag)
try:
return self.config.getint(self.section, tag)
except Exception:
pass
try:
return self.config.getfloat(self.section, tag)
except Exception:
pass
try:
return self.config.getboolean(self.section, tag)
except Exception:
pass
return raw_str
return _getter
for k,v in self.items():
_getter = make_getters(k)
setattr(ProjectOptions, k ,_getter)
def has_option(self, attribute):
return self.config.has_option(self.section, attribute)
def get(self, attribute):
return self.config.get(self.section, attribute)
def getint(self, attribute):
return self.config.getint(self.section, attribute)
def getfloat(self, attribute):
return self.config.getfloat(self.section, attribute)
def getboolean(self, attribute):
return self.config.getboolean(self.section, attribute)
def items(self):
return self.config.items(self.section)
def options(self):
return self.config.options(self.section) | gpl-3.0 | 8,143,751,695,777,203,000 | 29.186047 | 58 | 0.650732 | false |
phil-lopreiato/the-blue-alliance | tests/test_match_suggestion_accepter.py | 5 | 2158 | import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.event_type import EventType
from helpers.suggestions.match_suggestion_accepter import MatchSuggestionAccepter
from models.account import Account
from models.event import Event
from models.match import Match
from models.suggestion import Suggestion
class TestMatchSuggestionAccepter(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
self.account = Account(
email="[email protected]",
)
self.account.put()
self.suggestion = Suggestion(
author=self.account.key,
contents_json="{\"youtube_videos\":[\"123456\"]}",
target_key="2012ct_qm1",
target_model="match"
)
self.suggestion.put()
self.event = Event(
id="2012ct",
event_short="ct",
year=2012,
event_type_enum=EventType.REGIONAL
)
self.event.put()
self.match = Match(
id="2012ct_qm1",
alliances_json="""{"blue": {"score": -1, "teams": ["frc3464", "frc20", "frc1073"]}, "red": {"score": -1, "teams": ["frc69", "frc571", "frc176"]}}""",
comp_level="qm",
event=self.event.key,
year=2012,
set_number=1,
match_number=1,
team_key_names=[u'frc69', u'frc571', u'frc176', u'frc3464', u'frc20', u'frc1073'],
youtube_videos=["abcdef"]
)
self.match.put()
def tearDown(self):
self.testbed.deactivate()
def test_accept_suggestions(self):
MatchSuggestionAccepter.accept_suggestion(self.match, self.suggestion)
match = Match.get_by_id("2012ct_qm1")
self.assertTrue("abcdef" in match.youtube_videos)
self.assertTrue("123456" in match.youtube_videos)
| mit | 347,125,214,505,883,700 | 31.69697 | 161 | 0.606117 | false |
lalinsky/picard | picard/ui/sortablecheckboxlist.py | 2 | 4958 | # -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2015 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import sys
from functools import partial
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import pyqtSignal
class SortableCheckboxListWidget(QtGui.QWidget):
_CHECKBOX_POS = 0
_BUTTON_UP = 1
_BUTTON_DOWN = 2
__no_emit = False
changed = pyqtSignal(list)
def __init__(self, parent=None):
super(SortableCheckboxListWidget, self).__init__(parent)
layout = QtGui.QGridLayout()
layout.setHorizontalSpacing(5)
layout.setVerticalSpacing(2)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.__items = []
def addItems(self, items):
for item in items:
self.addItem(item)
def setSignals(self, row):
layout = self.layout()
checkbox = layout.itemAtPosition(row, self._CHECKBOX_POS).widget()
up = layout.itemAtPosition(row, self._BUTTON_UP).widget()
down = layout.itemAtPosition(row, self._BUTTON_DOWN).widget()
checkbox.stateChanged.connect(partial(self.checkbox_toggled, row))
up.clicked.connect(partial(self.move_button_clicked, row, up=True))
down.clicked.connect(partial(self.move_button_clicked, row, up=False))
def moveItem(self, from_row, to_row):
to_row = to_row % len(self.__items)
self.__items[to_row], self.__items[from_row] = \
self.__items[from_row], self.__items[to_row]
self.updateRow(to_row)
self.updateRow(from_row)
self._emit_changed()
def checkbox_toggled(self, row, state):
self.__items[row].setChecked(state == QtCore.Qt.Checked)
self._emit_changed()
def move_button_clicked(self, row, up):
if up:
to = row - 1
else:
to = row + 1
self.moveItem(row, to)
def updateRow(self, row):
self.__no_emit = True
item = self.__items[row]
layout = self.layout()
checkbox = layout.itemAtPosition(row, self._CHECKBOX_POS).widget()
checkbox.setText(item.text)
checkbox.setChecked(item.checked)
self.__no_emit = False
def addItem(self, item):
self.__items.append(item)
row = len(self.__items) - 1
layout = self.layout()
layout.addWidget(QtGui.QCheckBox(), row, self._CHECKBOX_POS)
self.updateRow(row)
up_button = QtGui.QToolButton()
up_button.setArrowType(QtCore.Qt.UpArrow)
up_button.setMaximumSize(QtCore.QSize(16, 16))
down_button = QtGui.QToolButton()
down_button.setArrowType(QtCore.Qt.DownArrow)
down_button.setMaximumSize(QtCore.QSize(16, 16))
layout.addWidget(up_button, row, self._BUTTON_UP)
layout.addWidget(down_button, row, self._BUTTON_DOWN)
self.setSignals(row)
def _emit_changed(self):
if not self.__no_emit:
self.changed.emit(self.__items)
def clear(self):
for i in reversed(range(len(self.__items))):
self._remove(i)
self.__items = []
def _remove(self, row):
self.layout().itemAtPosition(row, self._CHECKBOX_POS).widget().setParent(None)
self.layout().itemAtPosition(row, self._BUTTON_UP).widget().setParent(None)
self.layout().itemAtPosition(row, self._BUTTON_DOWN).widget().setParent(None)
class SortableCheckboxListItem(object):
def __init__(self, text=u'', checked=False, data=None):
self._checked = checked
self._text = text
self._data = data
@property
def text(self):
return self._text
def setText(self, text):
self._text = text
@property
def checked(self):
return self._checked
def setChecked(self, state):
self._checked = state
@property
def data(self):
return self._data
def setData(self, data):
self._data = data
def __repr__(self):
params = []
params.append('text=' + repr(self.text))
params.append('checked=' + repr(self.checked))
if self.data is not None:
params.append('data=' + repr(self.data))
return "%s(%s)" % (self.__class__.__name__, ", ".join(params))
| gpl-2.0 | -1,136,673,766,724,476,900 | 32.275168 | 86 | 0.629689 | false |
bh107/bohrium | bridge/npbackend/bohrium/blas.py | 6 | 3140 | """
Basic Linear Algebra Subprograms (BLAS)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Utilize BLAS directly from Python
"""
import bohrium as np
from sys import stderr
from . import ufuncs
def __blas(name, a, b, alpha=1.0, c=None, beta=0.0, shape_matters=True):
if not b is None:
if not (a.ndim == 2 and b.ndim == 2):
stderr.write("[ext] Matrices need to be two-dimensional.\n")
return None
if a.shape[1] != b.shape[0] and shape_matters:
stderr.write(
"[ext] Wrong shape of matrices: first argument has shape {} and second has shape {}.\n".format(a.shape,
b.shape))
return None
if not b.flags['C_CONTIGUOUS']:
b = b.copy()
else:
b = np.empty(shape=(a.shape[0], a.shape[1]), dtype=a.dtype)
if not a.flags['C_CONTIGUOUS']:
a = a.copy()
if c is None:
c = np.empty(shape=(a.shape[0], b.shape[1]), dtype=a.dtype)
elif not c.flags['C_CONTIGUOUS']:
c = c.copy()
if alpha != 1.0:
a = a * alpha
if beta != 0.0:
c = c * beta
ufuncs.extmethod(name, c, a, b) # modifies 'c'
return c
# All of A, B, and C are used
def gemm(a, b, alpha=1.0, c=None, beta=0.0):
""" C := alpha * A * B + beta * C """
return __blas("blas_gemm", a, b, alpha, c, beta)
def gemmt(a, b, alpha=1.0, c=None, beta=0.0):
""" C := alpha * A^T * B + beta * C """
return __blas("blas_gemmt", a, b, alpha, c, beta, shape_matters=False)
def symm(a, b, alpha=1.0, c=None, beta=0.0):
""" C := alpha * A * B + beta * C """
""" Notes: A is a symmetric matrix """
return __blas("blas_symm", a, b, alpha, c, beta)
def hemm(a, b, alpha=1.0, c=None, beta=0.0):
""" C := alpha * A * B + beta * C """
""" Notes: A is a hermitian matrix """
return __blas("blas_hemm", a, b, alpha, c, beta)
def syr2k(a, b, alpha=1.0, c=None, beta=0.0):
""" C := alpha * A * B**T + alpha * B * A**T + beta * C """
""" Notes: C is a symmetric matrix """
return __blas("blas_syr2k", a, b, alpha, c, beta)
def her2k(a, b, alpha=1.0, c=None, beta=0.0):
""" C := alpha * A * B**H + conjg(alpha) * B * A**H + beta * C """
""" Notes: C is a hermitian matrix """
return __blas("blas_her2k", a, b, alpha, c, beta)
# Only A and C are used
def syrk(a, alpha=1.0, c=None, beta=0.0):
""" C := alpha * A * A**T + beta * C """
""" Notes: C is a symmetric matrix """
return __blas("blas_syrk", a, None, alpha, c, beta)
def herk(a, alpha=1.0, c=None, beta=0.0):
""" C := alpha * A * A**H + beta * C """
""" Notes: C is a hermitian matrix """
return __blas("blas_herk", a, None, alpha, c, beta)
# Only A and B are used
def trmm(a, b, alpha=1.0):
""" B := alpha * A * B """
""" Notes: A is unit upper triangular matrix """
__blas("blas_trmm", a, b, alpha)
return b
def trsm(a, b):
""" Solves: A * X = B """
""" Notes: A is unit upper triangular matrix """
__blas("blas_trsm", a, b)
return b
| apache-2.0 | 5,975,971,097,379,497,000 | 28.345794 | 120 | 0.504777 | false |
xian123/azure-linux-automation | remote-scripts/ConfigureDnsServer.py | 3 | 1357 | #!/usr/bin/python
import argparse
import sys
from azuremodules import *
import paramiko
parser = argparse.ArgumentParser()
parser.add_argument('-D', '--vnetDomain_db_filepath', help='VNET Domain db filepath', required=True)
parser.add_argument('-r', '--vnetDomain_rev_filepath', help='VNET rev filepath',required=True)
parser.add_argument('-v', '--HostnameDIP', help='hosts filepath',required = True)
args = parser.parse_args()
vnetDomain_db_filepath = str(args.vnetDomain_db_filepath)
vnetDomain_rev_filepath = str(args.vnetDomain_rev_filepath)
HostnameDIP=str(args.HostnameDIP)
vnetDomain=(vnetDomain_db_filepath.split("/"))[len((vnetDomain_db_filepath.split("/")))-1].replace(".db","")
#SAMPLE INPUT FOR --vms
#HostnameDIP = 'ICA-VNETVM-Ubuntu1210PL-4-16-2013-1-2-0-role-0:192.168.4.196^ICA-VNETVM-Ubuntu1210PL-4-16-2013-1-2-0-role-1:192.168.4.132^ICA-VNETVM-Ubuntu1210PL-4-16-2013-1-2-1-role-0:192.168.4.133^ICA-VNETVM-Ubuntu1210PL-4-16-2013-1-2-1-role-1:192.168.4.197'
#SETTING THE GLOBAL PARAMS..
#SetVnetGlobalParameters()
#CONFIGURIG DNS SERVER CONFIGURATIONS FILES..
DNSServerStatus = AddICAVMsToDnsServer(HostnameDIP,vnetDomain_db_filepath,vnetDomain_rev_filepath)
#RESTARTING BIND9 SERVICE..
output = JustRun('service bind9 restart')
if DNSServerStatus == 0:
print("CONFIGURATION_SUCCESSFUL")
else:
print("CONFIGURATION_FAILED") | apache-2.0 | 5,368,585,750,515,196,000 | 49.296296 | 260 | 0.757553 | false |
UnoYakshi/DebtCollector | app/modules/auth/forms.py | 1 | 4398 | #! ~DebtCollector/app/modules/auth/forms.py
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, DateField
from wtforms.validators import DataRequired, Length, Email, EqualTo, Regexp, ValidationError
from wtforms.fields.html5 import DateField
from wtforms_components import DateRange
from datetime import datetime, date, timedelta
from .models import Users
from app import db
# Validator that checks if the field is not in the model yet...
class Unique(object):
def __init__(self, model, field, message='Is taken already.'):
self.model = model
self.field = field
self.message = message
def __call__(self, form, field):
check = self.model.query.filter(self.field == field.data).first()
if check:
raise ValidationError(self.message)
class NContains(object):
def __init__(self, model, field, message='Is taken already.'):
self.model = model
self.field = field
self.message = message
def __call__(self, form, field):
check = self.model.query.filter(self.field == field.data).first()
if check:
raise ValidationError(self.message)
class LoginForm(FlaskForm):
login = StringField('Login', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
def get_user(self):
return db.session.query(Users).filter_by(login=self.login.data).first()
class SignUpForm(LoginForm):
login = StringField('Login',
validators=[DataRequired(),
Unique(Users, Users.login),
Length(max=32)],
render_kw={"placeholder": "JoDo316"})
first_name = StringField('First Name',
validators=[DataRequired(),
Regexp('((^[A-Z][a-z]+$)|(^[А-Я][а-я]+$))',
message='Either cyrrilic, or latin. Start with the capital.'),
Length(min=2, max=32)],
render_kw={"placeholder": "John"})
last_name = StringField('Last Name',
validators=[DataRequired(),
Regexp('((^[A-Z][a-z]+$)|(^[А-Я][а-я]+$))',
message='Either cyrrilic, or latin. Start with the capital.'),
Length(min=2, max=32)],
render_kw={"placeholder": "Doe"})
email = StringField('Email',
validators=[DataRequired(),
Email(),
Unique(Users, Users.email),
Length(min=3, max=40)],
render_kw={"placeholder": "[email protected]"})
password = PasswordField('Password',
validators=[DataRequired(),
Regexp('((?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[@#$%])(^((?!' + str(login) + ').)*$).{8,32})',
message='Use at least once: a-z, A-Z, 0-9, [@#$%]. Don\'t use login somehow.'),
EqualTo('confirm', message='Passwords must match!')])
confirm = PasswordField('Confirm')
birthdate = DateField('Birthdate',
format='%d.%m.%Y',
validators=[DataRequired(),
DateRange(max=date.today() - 18*timedelta(days=365))
#, DateRange(min=date.today() - timedelta(years=18),max=date.today())
],
render_kw={"placeholder": "14.02.1990"})
def validate(self):
if not FlaskForm.validate(self):
return False
# Check for email...
user = db.session.query(Users).filter_by(email=self.email.data).first()
if user:
self.email.errors.append('That email is already taken.')
return False
# Check for login/username...
user = db.session.query(Users).filter_by(email=self.login.data).first()
if user:
self.login.errors.append('That login/username is already taken.')
return False
return True
| mit | 6,308,245,974,784,622,000 | 42.465347 | 132 | 0.5082 | false |
sergiohgz/incubator-airflow | tests/www/test_views.py | 3 | 27962 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import copy
import logging.config
import os
import shutil
import tempfile
import unittest
import sys
import json
from urllib.parse import quote_plus
from werkzeug.test import Client
from airflow import models, configuration, settings
from airflow.config_templates.airflow_local_settings import DEFAULT_LOGGING_CONFIG
from airflow.models import DAG, DagRun, TaskInstance
from airflow.operators.dummy_operator import DummyOperator
from airflow.settings import Session
from airflow.utils.timezone import datetime
from airflow.www import app as application
from airflow import configuration as conf
class TestChartModelView(unittest.TestCase):
CREATE_ENDPOINT = '/admin/chart/new/?url=/admin/chart/'
@classmethod
def setUpClass(cls):
super(TestChartModelView, cls).setUpClass()
session = Session()
session.query(models.Chart).delete()
session.query(models.User).delete()
session.commit()
user = models.User(username='airflow')
session.add(user)
session.commit()
session.close()
def setUp(self):
super(TestChartModelView, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.session = Session()
self.chart = {
'label': 'chart',
'owner': 'airflow',
'conn_id': 'airflow_ci',
}
def tearDown(self):
self.session.query(models.Chart).delete()
self.session.commit()
self.session.close()
super(TestChartModelView, self).tearDown()
@classmethod
def tearDownClass(cls):
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
super(TestChartModelView, cls).tearDownClass()
def test_create_chart(self):
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.chart,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.session.query(models.Chart).count(), 1)
def test_get_chart(self):
response = self.app.get(
'/admin/chart?sort=3',
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertIn('Sort by Owner', response.data.decode('utf-8'))
class TestVariableView(unittest.TestCase):
CREATE_ENDPOINT = '/admin/variable/new/?url=/admin/variable/'
@classmethod
def setUpClass(cls):
super(TestVariableView, cls).setUpClass()
session = Session()
session.query(models.Variable).delete()
session.commit()
session.close()
def setUp(self):
super(TestVariableView, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.session = Session()
self.variable = {
'key': 'test_key',
'val': 'text_val',
'is_encrypted': True
}
def tearDown(self):
self.session.query(models.Variable).delete()
self.session.commit()
self.session.close()
super(TestVariableView, self).tearDown()
def test_can_handle_error_on_decrypt(self):
# create valid variable
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.variable,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
# update the variable with a wrong value, given that is encrypted
Var = models.Variable
(self.session.query(Var)
.filter(Var.key == self.variable['key'])
.update({
'val': 'failed_value_not_encrypted'
}, synchronize_session=False))
self.session.commit()
# retrieve Variables page, should not fail and contain the Invalid
# label for the variable
response = self.app.get('/admin/variable', follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.session.query(models.Variable).count(), 1)
def test_xss_prevention(self):
xss = "/admin/airflow/variables/asdf<img%20src=''%20onerror='alert(1);'>"
response = self.app.get(
xss,
follow_redirects=True,
)
self.assertEqual(response.status_code, 404)
self.assertNotIn("<img src='' onerror='alert(1);'>",
response.data.decode("utf-8"))
class TestKnownEventView(unittest.TestCase):
CREATE_ENDPOINT = '/admin/knownevent/new/?url=/admin/knownevent/'
@classmethod
def setUpClass(cls):
super(TestKnownEventView, cls).setUpClass()
session = Session()
session.query(models.KnownEvent).delete()
session.query(models.User).delete()
session.commit()
user = models.User(username='airflow')
session.add(user)
session.commit()
cls.user_id = user.id
session.close()
def setUp(self):
super(TestKnownEventView, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.session = Session()
self.known_event = {
'label': 'event-label',
'event_type': '1',
'start_date': '2017-06-05 12:00:00',
'end_date': '2017-06-05 13:00:00',
'reported_by': self.user_id,
'description': '',
}
def tearDown(self):
self.session.query(models.KnownEvent).delete()
self.session.commit()
self.session.close()
super(TestKnownEventView, self).tearDown()
@classmethod
def tearDownClass(cls):
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
super(TestKnownEventView, cls).tearDownClass()
def test_create_known_event(self):
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.known_event,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.session.query(models.KnownEvent).count(), 1)
def test_create_known_event_with_end_data_earlier_than_start_date(self):
self.known_event['end_date'] = '2017-06-05 11:00:00'
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.known_event,
follow_redirects=True,
)
self.assertIn(
'Field must be greater than or equal to Start Date.',
response.data.decode('utf-8'),
)
self.assertEqual(self.session.query(models.KnownEvent).count(), 0)
class TestPoolModelView(unittest.TestCase):
CREATE_ENDPOINT = '/admin/pool/new/?url=/admin/pool/'
@classmethod
def setUpClass(cls):
super(TestPoolModelView, cls).setUpClass()
session = Session()
session.query(models.Pool).delete()
session.commit()
session.close()
def setUp(self):
super(TestPoolModelView, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.session = Session()
self.pool = {
'pool': 'test-pool',
'slots': 777,
'description': 'test-pool-description',
}
def tearDown(self):
self.session.query(models.Pool).delete()
self.session.commit()
self.session.close()
super(TestPoolModelView, self).tearDown()
def test_create_pool(self):
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.pool,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_create_pool_with_same_name(self):
# create test pool
self.app.post(
self.CREATE_ENDPOINT,
data=self.pool,
follow_redirects=True,
)
# create pool with the same name
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.pool,
follow_redirects=True,
)
self.assertIn('Already exists.', response.data.decode('utf-8'))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_create_pool_with_empty_name(self):
self.pool['pool'] = ''
response = self.app.post(
self.CREATE_ENDPOINT,
data=self.pool,
follow_redirects=True,
)
self.assertIn('This field is required.', response.data.decode('utf-8'))
self.assertEqual(self.session.query(models.Pool).count(), 0)
class TestLogView(unittest.TestCase):
DAG_ID = 'dag_for_testing_log_view'
TASK_ID = 'task_for_testing_log_view'
DEFAULT_DATE = datetime(2017, 9, 1)
ENDPOINT = '/admin/airflow/log?dag_id={dag_id}&task_id={task_id}&execution_date={execution_date}'.format(
dag_id=DAG_ID,
task_id=TASK_ID,
execution_date=DEFAULT_DATE,
)
@classmethod
def setUpClass(cls):
super(TestLogView, cls).setUpClass()
session = Session()
session.query(TaskInstance).filter(
TaskInstance.dag_id == cls.DAG_ID and
TaskInstance.task_id == cls.TASK_ID and
TaskInstance.execution_date == cls.DEFAULT_DATE).delete()
session.commit()
session.close()
def setUp(self):
super(TestLogView, self).setUp()
# Create a custom logging configuration
configuration.load_test_config()
logging_config = copy.deepcopy(DEFAULT_LOGGING_CONFIG)
current_dir = os.path.dirname(os.path.abspath(__file__))
logging_config['handlers']['task']['base_log_folder'] = os.path.normpath(
os.path.join(current_dir, 'test_logs'))
logging_config['handlers']['task']['filename_template'] = \
'{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts | replace(":", ".") }}/{{ try_number }}.log'
# Write the custom logging configuration to a file
self.settings_folder = tempfile.mkdtemp()
settings_file = os.path.join(self.settings_folder, "airflow_local_settings.py")
new_logging_file = "LOGGING_CONFIG = {}".format(logging_config)
with open(settings_file, 'w') as handle:
handle.writelines(new_logging_file)
sys.path.append(self.settings_folder)
conf.set('core', 'logging_config_class', 'airflow_local_settings.LOGGING_CONFIG')
app = application.create_app(testing=True)
self.app = app.test_client()
self.session = Session()
from airflow.www.views import dagbag
dag = DAG(self.DAG_ID, start_date=self.DEFAULT_DATE)
task = DummyOperator(task_id=self.TASK_ID, dag=dag)
dagbag.bag_dag(dag, parent_dag=dag, root_dag=dag)
ti = TaskInstance(task=task, execution_date=self.DEFAULT_DATE)
ti.try_number = 1
self.session.merge(ti)
self.session.commit()
def tearDown(self):
logging.config.dictConfig(DEFAULT_LOGGING_CONFIG)
self.session.query(TaskInstance).filter(
TaskInstance.dag_id == self.DAG_ID and
TaskInstance.task_id == self.TASK_ID and
TaskInstance.execution_date == self.DEFAULT_DATE).delete()
self.session.commit()
self.session.close()
sys.path.remove(self.settings_folder)
shutil.rmtree(self.settings_folder)
conf.set('core', 'logging_config_class', '')
super(TestLogView, self).tearDown()
def test_get_file_task_log(self):
response = self.app.get(
TestLogView.ENDPOINT,
follow_redirects=True,
)
self.assertEqual(response.status_code, 200)
self.assertIn('Log by attempts',
response.data.decode('utf-8'))
def test_get_logs_with_metadata(self):
url_template = "/admin/airflow/get_logs_with_metadata?dag_id={}&" \
"task_id={}&execution_date={}&" \
"try_number={}&metadata={}"
response = \
self.app.get(url_template.format(self.DAG_ID,
self.TASK_ID,
quote_plus(self.DEFAULT_DATE.isoformat()),
1,
json.dumps({})))
self.assertIn('"message":', response.data.decode('utf-8'))
self.assertIn('"metadata":', response.data.decode('utf-8'))
self.assertIn('Log for testing.', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
def test_get_logs_with_null_metadata(self):
url_template = "/admin/airflow/get_logs_with_metadata?dag_id={}&" \
"task_id={}&execution_date={}&" \
"try_number={}&metadata=null"
response = \
self.app.get(url_template.format(self.DAG_ID,
self.TASK_ID,
quote_plus(self.DEFAULT_DATE.isoformat()),
1))
self.assertIn('"message":', response.data.decode('utf-8'))
self.assertIn('"metadata":', response.data.decode('utf-8'))
self.assertIn('Log for testing.', response.data.decode('utf-8'))
self.assertEqual(200, response.status_code)
class TestVarImportView(unittest.TestCase):
IMPORT_ENDPOINT = '/admin/airflow/varimport'
@classmethod
def setUpClass(cls):
super(TestVarImportView, cls).setUpClass()
session = Session()
session.query(models.User).delete()
session.commit()
user = models.User(username='airflow')
session.add(user)
session.commit()
session.close()
def setUp(self):
super(TestVarImportView, self).setUp()
configuration.load_test_config()
app = application.create_app(testing=True)
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
def tearDown(self):
super(TestVarImportView, self).tearDown()
@classmethod
def tearDownClass(cls):
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
super(TestVarImportView, cls).tearDownClass()
def test_import_variables(self):
content = ('{"str_key": "str_value", "int_key": 60,'
'"list_key": [1, 2], "dict_key": {"k_a": 2, "k_b": 3}}')
try:
# python 3+
bytes_content = io.BytesIO(bytes(content, encoding='utf-8'))
except TypeError:
# python 2.7
bytes_content = io.BytesIO(bytes(content))
response = self.app.post(
self.IMPORT_ENDPOINT,
data={'file': (bytes_content, 'test.json')},
follow_redirects=True
)
self.assertEqual(response.status_code, 200)
body = response.data.decode('utf-8')
self.assertIn('str_key', body)
self.assertIn('int_key', body)
self.assertIn('list_key', body)
self.assertIn('dict_key', body)
self.assertIn('str_value', body)
self.assertIn('60', body)
self.assertIn('[1, 2]', body)
# As dicts are not ordered, we may get any of the following cases.
case_a_dict = '{"k_a": 2, "k_b": 3}'
case_b_dict = '{"k_b": 3, "k_a": 2}'
try:
self.assertIn(case_a_dict, body)
except AssertionError:
self.assertIn(case_b_dict, body)
class TestMountPoint(unittest.TestCase):
def setUp(self):
super(TestMountPoint, self).setUp()
configuration.load_test_config()
configuration.conf.set("webserver", "base_url", "http://localhost:8080/test")
config = dict()
config['WTF_CSRF_METHODS'] = []
# Clear cached app to remount base_url forcefully
application.app = None
app = application.cached_app(config=config, testing=True)
self.client = Client(app)
def test_mount(self):
response, _, _ = self.client.get('/', follow_redirects=True)
txt = b''.join(response)
self.assertEqual(b"Apache Airflow is not at this location", txt)
response, _, _ = self.client.get('/test', follow_redirects=True)
resp_html = b''.join(response)
self.assertIn(b"DAGs", resp_html)
class ViewWithDateTimeAndNumRunsAndDagRunsFormTester:
DAG_ID = 'dag_for_testing_dt_nr_dr_form'
DEFAULT_DATE = datetime(2017, 9, 1)
RUNS_DATA = [
('dag_run_for_testing_dt_nr_dr_form_4', datetime(2018, 4, 4)),
('dag_run_for_testing_dt_nr_dr_form_3', datetime(2018, 3, 3)),
('dag_run_for_testing_dt_nr_dr_form_2', datetime(2018, 2, 2)),
('dag_run_for_testing_dt_nr_dr_form_1', datetime(2018, 1, 1)),
]
def __init__(self, test, endpoint):
self.test = test
self.endpoint = endpoint
def setUp(self):
configuration.load_test_config()
app = application.create_app(testing=True)
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.session = Session()
from airflow.www.views import dagbag
from airflow.utils.state import State
dag = DAG(self.DAG_ID, start_date=self.DEFAULT_DATE)
dagbag.bag_dag(dag, parent_dag=dag, root_dag=dag)
self.runs = []
for rd in self.RUNS_DATA:
run = dag.create_dagrun(
run_id=rd[0],
execution_date=rd[1],
state=State.SUCCESS,
external_trigger=True
)
self.runs.append(run)
def tearDown(self):
self.session.query(DagRun).filter(
DagRun.dag_id == self.DAG_ID).delete()
self.session.commit()
self.session.close()
def assertBaseDateAndNumRuns(self, base_date, num_runs, data):
self.test.assertNotIn('name="base_date" value="{}"'.format(base_date), data)
self.test.assertNotIn('<option selected="" value="{}">{}</option>'.format(
num_runs, num_runs), data)
def assertRunIsNotInDropdown(self, run, data):
self.test.assertNotIn(run.execution_date.isoformat(), data)
self.test.assertNotIn(run.run_id, data)
def assertRunIsInDropdownNotSelected(self, run, data):
self.test.assertIn('<option value="{}">{}</option>'.format(
run.execution_date.isoformat(), run.run_id), data)
def assertRunIsSelected(self, run, data):
self.test.assertIn('<option selected value="{}">{}</option>'.format(
run.execution_date.isoformat(), run.run_id), data)
def test_with_default_parameters(self):
"""
Tests graph view with no URL parameter.
Should show all dag runs in the drop down.
Should select the latest dag run.
Should set base date to current date (not asserted)
"""
response = self.app.get(
self.endpoint
)
self.test.assertEqual(response.status_code, 200)
data = response.data.decode('utf-8')
self.test.assertIn('Base date:', data)
self.test.assertIn('Number of runs:', data)
self.assertRunIsSelected(self.runs[0], data)
self.assertRunIsInDropdownNotSelected(self.runs[1], data)
self.assertRunIsInDropdownNotSelected(self.runs[2], data)
self.assertRunIsInDropdownNotSelected(self.runs[3], data)
def test_with_execution_date_parameter_only(self):
"""
Tests graph view with execution_date URL parameter.
Scenario: click link from dag runs view.
Should only show dag runs older than execution_date in the drop down.
Should select the particular dag run.
Should set base date to execution date.
"""
response = self.app.get(
self.endpoint + '&execution_date={}'.format(
self.runs[1].execution_date.isoformat())
)
self.test.assertEqual(response.status_code, 200)
data = response.data.decode('utf-8')
self.assertBaseDateAndNumRuns(
self.runs[1].execution_date,
configuration.getint('webserver', 'default_dag_run_display_number'),
data)
self.assertRunIsNotInDropdown(self.runs[0], data)
self.assertRunIsSelected(self.runs[1], data)
self.assertRunIsInDropdownNotSelected(self.runs[2], data)
self.assertRunIsInDropdownNotSelected(self.runs[3], data)
def test_with_base_date_and_num_runs_parmeters_only(self):
"""
Tests graph view with base_date and num_runs URL parameters.
Should only show dag runs older than base_date in the drop down,
limited to num_runs.
Should select the latest dag run.
Should set base date and num runs to submitted values.
"""
response = self.app.get(
self.endpoint + '&base_date={}&num_runs=2'.format(
self.runs[1].execution_date.isoformat())
)
self.test.assertEqual(response.status_code, 200)
data = response.data.decode('utf-8')
self.assertBaseDateAndNumRuns(self.runs[1].execution_date, 2, data)
self.assertRunIsNotInDropdown(self.runs[0], data)
self.assertRunIsSelected(self.runs[1], data)
self.assertRunIsInDropdownNotSelected(self.runs[2], data)
self.assertRunIsNotInDropdown(self.runs[3], data)
def test_with_base_date_and_num_runs_and_execution_date_outside(self):
"""
Tests graph view with base_date and num_runs and execution-date URL parameters.
Scenario: change the base date and num runs and press "Go",
the selected execution date is outside the new range.
Should only show dag runs older than base_date in the drop down.
Should select the latest dag run within the range.
Should set base date and num runs to submitted values.
"""
response = self.app.get(
self.endpoint + '&base_date={}&num_runs=42&execution_date={}'.format(
self.runs[1].execution_date.isoformat(),
self.runs[0].execution_date.isoformat())
)
self.test.assertEqual(response.status_code, 200)
data = response.data.decode('utf-8')
self.assertBaseDateAndNumRuns(self.runs[1].execution_date, 42, data)
self.assertRunIsNotInDropdown(self.runs[0], data)
self.assertRunIsSelected(self.runs[1], data)
self.assertRunIsInDropdownNotSelected(self.runs[2], data)
self.assertRunIsInDropdownNotSelected(self.runs[3], data)
def test_with_base_date_and_num_runs_and_execution_date_within(self):
"""
Tests graph view with base_date and num_runs and execution-date URL parameters.
Scenario: change the base date and num runs and press "Go",
the selected execution date is within the new range.
Should only show dag runs older than base_date in the drop down.
Should select the dag run with the execution date.
Should set base date and num runs to submitted values.
"""
response = self.app.get(
self.endpoint + '&base_date={}&num_runs=5&execution_date={}'.format(
self.runs[2].execution_date.isoformat(),
self.runs[3].execution_date.isoformat())
)
self.test.assertEqual(response.status_code, 200)
data = response.data.decode('utf-8')
self.assertBaseDateAndNumRuns(self.runs[2].execution_date, 5, data)
self.assertRunIsNotInDropdown(self.runs[0], data)
self.assertRunIsNotInDropdown(self.runs[1], data)
self.assertRunIsInDropdownNotSelected(self.runs[2], data)
self.assertRunIsSelected(self.runs[3], data)
class TestGraphView(unittest.TestCase):
GRAPH_ENDPOINT = '/admin/airflow/graph?dag_id={dag_id}'.format(
dag_id=ViewWithDateTimeAndNumRunsAndDagRunsFormTester.DAG_ID
)
@classmethod
def setUpClass(cls):
super(TestGraphView, cls).setUpClass()
def setUp(self):
super(TestGraphView, self).setUp()
self.tester = ViewWithDateTimeAndNumRunsAndDagRunsFormTester(
self, self.GRAPH_ENDPOINT)
self.tester.setUp()
def tearDown(self):
self.tester.tearDown()
super(TestGraphView, self).tearDown()
@classmethod
def tearDownClass(cls):
super(TestGraphView, cls).tearDownClass()
def test_dt_nr_dr_form_default_parameters(self):
self.tester.test_with_default_parameters()
def test_dt_nr_dr_form_with_execution_date_parameter_only(self):
self.tester.test_with_execution_date_parameter_only()
def test_dt_nr_dr_form_with_base_date_and_num_runs_parmeters_only(self):
self.tester.test_with_base_date_and_num_runs_parmeters_only()
def test_dt_nr_dr_form_with_base_date_and_num_runs_and_execution_date_outside(self):
self.tester.test_with_base_date_and_num_runs_and_execution_date_outside()
def test_dt_nr_dr_form_with_base_date_and_num_runs_and_execution_date_within(self):
self.tester.test_with_base_date_and_num_runs_and_execution_date_within()
class TestGanttView(unittest.TestCase):
GANTT_ENDPOINT = '/admin/airflow/gantt?dag_id={dag_id}'.format(
dag_id=ViewWithDateTimeAndNumRunsAndDagRunsFormTester.DAG_ID
)
@classmethod
def setUpClass(cls):
super(TestGanttView, cls).setUpClass()
def setUp(self):
super(TestGanttView, self).setUp()
self.tester = ViewWithDateTimeAndNumRunsAndDagRunsFormTester(
self, self.GANTT_ENDPOINT)
self.tester.setUp()
def tearDown(self):
self.tester.tearDown()
super(TestGanttView, self).tearDown()
@classmethod
def tearDownClass(cls):
super(TestGanttView, cls).tearDownClass()
def test_dt_nr_dr_form_default_parameters(self):
self.tester.test_with_default_parameters()
def test_dt_nr_dr_form_with_execution_date_parameter_only(self):
self.tester.test_with_execution_date_parameter_only()
def test_dt_nr_dr_form_with_base_date_and_num_runs_parmeters_only(self):
self.tester.test_with_base_date_and_num_runs_parmeters_only()
def test_dt_nr_dr_form_with_base_date_and_num_runs_and_execution_date_outside(self):
self.tester.test_with_base_date_and_num_runs_and_execution_date_outside()
def test_dt_nr_dr_form_with_base_date_and_num_runs_and_execution_date_within(self):
self.tester.test_with_base_date_and_num_runs_and_execution_date_within()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,816,191,596,842,511,000 | 36.382353 | 109 | 0.615049 | false |
deokwooj/DDEA | webgui/data_preprocess.py | 1 | 20388 | #!/usr/bin/python
# To force float point division
from __future__ import division
"""
Created on Fri Mar 14 01:34:41 2014
Author : Deokwoo Jung
E-mail : [email protected]
"""
import numpy as np
from numpy.linalg import norm
from scipy.interpolate import interp1d
from shared_constants import *
from data_tools import *
from scipy.stats import stats
import time
import multiprocessing as mp
from log_util import log
import traceback
def pp_verify_sensor_data_format(tup):
(key, data_list, time_slots, q) = tup
log.info(' checking ' + key + '...')
try:
for i, samples in enumerate(data_list):
for j, each_sample in enumerate(samples):
if each_sample == []:
q.put([key, i, j])
log.info(str(each_sample) + ' at ' + str(time_slots[i]) + ' in ' + str(key))
elif not isinstance(each_sample, int) and not isinstance(each_sample, float):
q.put([key, i, j])
log.info(str(each_sample) + ' at ' + str(time_slots[i]) + ' in ' + str(key))
except Exception as e:
log.error(traceback.print_exc())
log.error(str(e))
def verify_data_format(data_dict, PARALLEL=False):
# Verify there is no [] or N/A in the list
# Only FLoat or Int format is allowed
log.info('Checking any inconsisent data format...')
log.info('-' * 40)
list_of_wrong_data_format = list()
time_slots = data_dict['time_slots']
weather_list_used = [data_dict['weather_list'][i] for i in [1, 2, 3, 10, 11]]
key_list = weather_list_used+ data_dict['sensor_list']
if not PARALLEL:
for key in key_list:
log.info('checking ' + str(key) + '...')
for i, samples in enumerate(data_dict[key][1]):
for j, each_sample in enumerate(samples):
if each_sample == []:
list_of_wrong_data_format.append([key, i, j])
log.info(str(each_sample) + ' at ' + str(time_slots[i]) + ' in ' + str(key))
elif not isinstance(each_sample, int) and not isinstance(each_sample, float):
list_of_wrong_data_format.append([key, i, j])
log.info(str(each_sample) + ' at ' + str(time_slots[i]) + ' in ' + str(key))
log.info('-' * 40)
# PARALLEL
else:
manager = mp.Manager()
q = manager.Queue()
p = mp.Pool(CPU_CORE_NUM)
param_list = [(key, data_dict[key][1], time_slots, q) for key in key_list]
p.map(pp_verify_sensor_data_format, param_list)
p.close()
p.join()
while not q.empty():
item = q.get()
log.warn('queue item: ' + str(item))
list_of_wrong_data_format.append(item)
if len(list_of_wrong_data_format) > 0:
log.critical('Inconsistent data format in the list of data_used')
raise NameError('Inconsistent data format in the list of data_used')
return list_of_wrong_data_format
def verify_data_mat(X):
num_err_temp = np.array([[len(np.nonzero(np.isnan(sample))[0]),len(np.nonzero(sample==np.inf)[0]),len(np.nonzero(np.var(sample)==0)[0])] for sample in X])
num_err = np.sum(num_err_temp, axis=0)
for err_idx in np.argwhere( num_err > 0):
if err_idx == 0:
NameError('nan entry found')
if err_idx == 1:
NameError('inf entry found')
if err_idx == 2:
NameError('zero var found')
log.info('all entry values of data matrix are verifed ok')
def normalize_data(data_input):
y_pred = data_input.copy()
y_temp = np.delete(y_pred, np.nonzero(y_pred == np.infty), axis=0)
y_temp_sort = np.sort(y_temp)[int(np.ceil(len(y_temp)*0.05)):int(np.floor(len(y_temp)*0.95))]
var_temp = np.var(y_temp_sort)
# At least 2 non-infty elements in y_pred
if var_temp > 0:
no_inf_idx = np.nonzero(y_pred != np.infty)
y_pred[no_inf_idx] = y_pred[no_inf_idx] - np.mean(y_pred[no_inf_idx])
temp_val = y_pred/norm(y_pred[no_inf_idx])
temp_status = 0
else:
temp_val = list(set(y_temp_sort))
temp_status = -1
return temp_val, temp_status
def interploate_data(x_temp, num_type, max_num_succ_idx_for_itpl):
num_of_samples = x_temp.shape[0]
inf_idx = np.nonzero(x_temp == np.inf)[0]
noinf_idx = np.nonzero(x_temp != np.inf)[0]
# Dont interploate the values on bondary.
inter_idx = np.delete(inf_idx, np.nonzero(inf_idx == 0))
inter_idx = np.delete(inter_idx, np.nonzero(inter_idx == num_of_samples-1))
#############################################################################################
# Dont interploate the values unknown successively more than num_succ_idx_no_interploate
# Then deletea any index that meet the condition above,
# inter_idx=np.delete(inter_idx,those index)
# Need to be completed .....
#############################################################################################
# Find successive inf indices
succ_inf_idx = []
for i in range(0, len(noinf_idx) - 1):
# number of successive inf between two non-inf indices
num_succ_inf = noinf_idx[i+1] - noinf_idx[i] - 1
if num_succ_inf > max_num_succ_idx_for_itpl:
succ_inf_idx = succ_inf_idx + range(noinf_idx[i]+1, noinf_idx[i+1])
# Remove successive inf indices
inter_idx = list(set(inter_idx) - set(succ_inf_idx))
if num_type == FLOAT_TYPE:
#f = interp1d(noinf_idx,x_temp[noinf_idx,0],'linear')
val_new = np.interp(inter_idx,noinf_idx, x_temp[noinf_idx,0])
#val_new = np.interp(t_new, t_,val_)
elif num_type == INT_TYPE:
#f = interp1d(noinf_idx,x_temp[noinf_idx,0],'nearest')
val_new = fast_nearest_interp(inter_idx, noinf_idx, x_temp[noinf_idx, 0])
else:
raise NameError('Sample type must either INT or FLOAT type')
#x_temp[inter_idx,0]=f(inter_idx)
x_temp[inter_idx, 0] = val_new
log.warn('No sample in time slot ' + str(inf_idx))
log.warn(str(len(inter_idx)) + ' / ' + str(len(inf_idx)) + ' time slots are interplated')
return x_temp
def get_feature(data_dict_samples,num_type):
x_temp = []
for i, sample in enumerate(data_dict_samples):
# If sample=[], np.std returns 0. Avoid zero std, add a infitestimal number
# Set infty if no sample is availble
if len(sample) == 0:
x_temp.append(np.inf)
else:
if num_type == INT_TYPE:
x_temp.append(int(stats.mode(sample)[0]))
elif num_type == FLOAT_TYPE:
x_temp.append(np.mean(sample))
else:
raise NameError('Sample type must either INT or FLOAT type')
x_temp = np.array(x_temp)[:, np.newaxis]
return x_temp
# Mean value measure
def build_feature_matrix(data_dict, sensor_list, weather_list, time_slots, interpolation=1, max_num_succ_idx_for_itpl=4):
data_used = sensor_list + weather_list
log.info('Build data feature matrix now.....')
if interpolation == 1:
log.info('Missing samples will be interpolated upto ' + str(max_num_succ_idx_for_itpl) + 'successive time slots')
else:
log.info('All time slots with any missing sample will be removed without interpolatoin ')
num_of_data = len(data_used)
num_of_samples = len(time_slots)
# Declare as 2-d list for exception.
X = list()
INT_type_list = list()
FLOAT_type_list = list()
input_names = list()
weather_type_idx = list()
sensor_type_idx = list()
INT_type_idx = list()
FLOAT_type_idx = list()
zero_var_list = list()
zero_var_val = list()
# whose variance is zero, hence carry no information,
# Constrcut X matrix by summerizing hourly samples
for j, key in enumerate(data_used):
log.info('-' * 40)
log.info('building for ' + str(key))
try:
num_type = check_data_type(data_dict[key][2][1])
# Avg. value feature
x_temp = get_feature(data_dict[key][1], num_type)
non_inf_idx = np.nonzero(x_temp < np.inf)[0]
#if non_inf_idx <len(time_slots):measurement_point_set
# Outlier removal, different parameters for sensors and weather data
if len(sensor_list) <= j:
# weather data
is_weather_data = True
outlier_idx = outlier_detect(x_temp[non_inf_idx], 5, 10)
else:
is_weather_data = False
outlier_idx = outlier_detect(x_temp[non_inf_idx], 1, 20)
if len(outlier_idx) > 0:
log.info('outlier samples are detected: outlier_idx:' + str(outlier_idx))
x_temp[non_inf_idx[outlier_idx]] = np.inf
# interplolation data, use nearest for int type, use linear for float type
if interpolation == 1:
x_temp = interploate_data(x_temp, num_type, max_num_succ_idx_for_itpl)
norm_data_vec, output_status = normalize_data(x_temp[:, 0])
if len(np.nonzero(norm_data_vec == np.inf)[0]) > num_of_samples/5:
raise
except Exception as e:
log.error(traceback.print_exc())
log.error(' Error in processing data feature, excluded from analysis ' + str(e))
output_status = -1
norm_data_vec = None
if output_status == -1:
zero_var_list.append(key)
zero_var_val.append(norm_data_vec)
log.info('too small variance for float type, added to zero var list')
else:
input_names.append(key)
log.info(str(j)+'th sensor update')
if (num_type == FLOAT_TYPE) and (is_weather_data == False):
X.append(norm_data_vec)
FLOAT_type_idx.append(len(X)-1)
FLOAT_type_list.append(key)
elif (num_type == INT_TYPE) or (is_weather_data == True):
X.append(x_temp[:, 0])
INT_type_idx.append(len(X)-1)
INT_type_list.append(key)
else:
log.error('Sample type must either INT or FLOAT type')
raise NameError('Sample type must either INT or FLOAT type')
if key in weather_list:
weather_type_idx.append(len(X)-1)
elif key in sensor_list:
sensor_type_idx.append(len(X)-1)
else:
log.error('Sample type must either Weather or Sensor type')
raise NameError('Sample type must either Weather or Sensor type')
# Linear Interpolate
X = np.array(X).T
if X.shape[0] != num_of_samples:
log.error('The numeber of rows in feature matrix and the number of the time slots are different ')
raise NameError('The numeber of rows in feature matrix and the number of the time slots are different ')
if X.shape[1]+len(zero_var_list) != num_of_data:
log.error('The sume of the numeber of column in feature matrix and the number of zero var column are different from the number of input measurements ')
raise NameError('The sume of the numeber of column in feature matrix and the number of zero var column are different from the number of input measurements ')
deleted_timeslot_idx=[]
log.info('-' * 20)
log.info('removing time slots having no sample...')
inf_idx_set = []
for col_vec in X.T:
inf_idx = np.nonzero(col_vec ==np.infty)[0]
inf_idx_set = np.r_[inf_idx_set, inf_idx]
inf_col_idx = list(set(list(inf_idx_set)))
deleted_timeslot_idx = np.array([int(x) for x in inf_col_idx])
log.info('time slots ' + str(deleted_timeslot_idx) + ' removed...')
log.info('-' * 20)
X = np.delete(X, deleted_timeslot_idx, axis=0)
new_time_slot = np.delete(time_slots, deleted_timeslot_idx)
# Checking whether it has any ill entry value
verify_data_mat(X)
return X, new_time_slot, input_names, zero_var_list, zero_var_val, INT_type_list, INT_type_idx, FLOAT_type_list, FLOAT_type_idx, weather_type_idx, sensor_type_idx
# Abs Diff value measure
def build_diff(args):
(k, time_slots, conf_lev, set_val, set_name, num_type) = args
log.info(set_name)
try:
diff_mean = get_diff(set_val, time_slots, num_type, conf_lev)
if num_type == FLOAT_TYPE:
#norm_diff_mean,output_status=normalize_data(diff_mean[:,0])
norm_diff_mean,output_status=normalize_data(diff_mean)
elif num_type == INT_TYPE:
#num_discrete_vals=len(set(list(diff_mean[:,0])))
num_discrete_vals=len(set(list(diff_mean)))
log.info('num_discrete_vals :' + str(num_discrete_vals))
if num_discrete_vals>1:
output_status = 0
norm_diff_mean = diff_mean
else:
output_status = -1
norm_diff_mean = list(set(diff_mean))
#norm_diff_mean=list(set(diff_mean[:,0]))
else:
pass
except Exception as e:
log.error(traceback.print_exc())
log.error('Error in processing data feature, excluded from analysis ' + str(e))
output_status = -1
norm_diff_mean = None
return (k,[output_status, norm_diff_mean])
return (k, [output_status, norm_diff_mean])
def get_diff(set_val,time_slots,num_type,conf_lev):
time_slots_utc = dtime_to_unix(time_slots)
TIMELET_INV_seconds = (time_slots[1]-time_slots[0]).seconds
diff_mean = list()
for r, utc_t in enumerate(time_slots_utc):
utc_t_s = utc_t
utc_t_e = utc_t + TIMELET_INV_seconds
idx = np.nonzero((set_val[0] >= utc_t_s) & (set_val[0] < utc_t_e))[0]
if len(idx) < 2:
diff_val = np.inf
else:
temp_val = abs(np.diff(set_val[1][idx]))
upper_val = np.sort(temp_val)[int(np.floor(len(temp_val)*conf_lev)):]
if len(upper_val) == 0:
diff_val = np.inf
else:
if num_type == FLOAT_TYPE:
diff_val = np.mean(upper_val)
elif num_type == INT_TYPE:
diff_val = int(stats.mode(upper_val)[0])
else:
log.error('Sample type must either INT or FLOAT type')
raise NameError('Sample type must either INT or FLOAT type')
#diff_val=max(abs(diff(set_val[1][idx])))
#sort(abs(diff(set_val[1][idx])))[::-1]
diff_mean.append(diff_val)
#diff_mean=np.array(diff_mean)[:,np.newaxis]
diff_mean = np.array(diff_mean)
return diff_mean
# Abs Diff value measure
def build_diff_matrix(measurement_point_set, time_slots, num_type_set, irr_data_name, conf_lev=0.5, PARALLEL=False):
#time_slots_utc = dtime_to_unix(time_slots)
Xdiff = list()
input_names = list()
INT_type_list = list()
FLOAT_type_list = list()
INT_type_idx = list()
FLOAT_type_idx = list()
zero_var_list = list()
# whose variance is zero, hence carry no information,
zero_var_val = list()
num_of_samples = len(time_slots)
#TIMELET_INV_seconds = (time_slots[1]-time_slots[0]).seconds
log.info('=' * 40)
if not PARALLEL:
for k, (set_val, set_name) in enumerate(zip(measurement_point_set, irr_data_name)):
log.info(str(irr_data_name[k]))
try:
num_type = num_type_set[k]
diff_mean = get_diff(set_val, time_slots, num_type, conf_lev)
if num_type == FLOAT_TYPE:
#norm_diff_mean,output_status=normalize_data(diff_mean[:,0])
norm_diff_mean, output_status = normalize_data(diff_mean)
elif num_type == INT_TYPE:
#num_discrete_vals=len(set(list(diff_mean[:,0])))
num_discrete_vals = len(set(list(diff_mean)))
log.info('num_discrete_vals : ' + str(num_discrete_vals))
if num_discrete_vals > 1:
output_status = 0
norm_diff_mean = diff_mean
else:
output_status = -1
#norm_diff_mean = list(set(diff_mean[:,0]))
norm_diff_mean = list(set(diff_mean))
else:
pass
if len(np.nonzero(norm_diff_mean == np.inf)[0])>num_of_samples/5:
raise
except Exception as e:
log.error(traceback.print_exc())
log.error('Error in processing data feature, excluded from analysis ' + str(e))
output_status = -1
norm_diff_mean = None
if output_status == -1:
#zero_var_flag=1
zero_var_list.append(set_name)
zero_var_val.append(norm_diff_mean)
log.warn('too small variance for float type or a single value for int type, added to zero var list')
else:
input_names.append(set_name)
Xdiff.append(norm_diff_mean)
if num_type == FLOAT_TYPE:
FLOAT_type_list.append(set_name)
FLOAT_type_idx.append(len(Xdiff)-1)
elif num_type == INT_TYPE:
INT_type_list.append(set_name)
INT_type_idx.append(len(Xdiff)-1)
log.info('-' * 20)
log.info('-' * 40)
# PARALLEL ENABLED
else:
log.info('Build diff matrix: Parallel enabled...')
# Construct param list for workers
param_list = list()
for k, (set_val, set_name) in enumerate(zip(measurement_point_set, irr_data_name)):
param_list.append((k, time_slots, conf_lev, set_val, set_name, num_type_set[k]))
p = mp.Pool(CPU_CORE_NUM)
ret_dict = dict(p.map(build_diff, param_list))
p.close()
p.join()
for k in sorted(ret_dict.keys()):
"""
v = ret_dict[k]
output_status = v[0]
norm_diff_mean = v[1]
"""
output_status, norm_diff_mean = ret_dict[k]
set_name = irr_data_name[k]
num_type = num_type_set[k]
if output_status == -1:
zero_var_list.append(set_name)
#zero_var_flag=1
zero_var_val.append(norm_diff_mean)
log.warn("too small variance for float type or a single value for int type, added to zero var list")
else:
input_names.append(set_name)
try:
Xdiff.append(norm_diff_mean)
except Exception as e:
log.error(traceback.print_exc())
log.error(str(e))
if num_type == FLOAT_TYPE:
FLOAT_type_list.append(set_name)
FLOAT_type_idx.append(len(Xdiff)-1)
elif num_type == INT_TYPE:
INT_type_list.append(set_name)
INT_type_idx.append(len(Xdiff)-1)
log.info('-' * 20)
Xdiff = np.array(Xdiff).T
deleted_timeslot_idx = list()
log.info('-' * 20)
log.info('removing time slots having no sample...')
inf_idx_set = list()
for col_vec in Xdiff.T:
inf_idx = np.nonzero(col_vec == np.infty)[0]
inf_idx_set=np.r_[inf_idx_set, inf_idx]
inf_col_idx = list(set(list(inf_idx_set)))
deleted_timeslot_idx = np.array([int(x) for x in inf_col_idx]).astype(int)
log.info('time slots ' + str(deleted_timeslot_idx) + ' removed...')
log.info('-' * 20)
Xdiff = np.delete(Xdiff, deleted_timeslot_idx, axis=0)
new_time_slot = np.delete(time_slots, deleted_timeslot_idx)
# Checking whether it has any ill entry value
verify_data_mat(Xdiff)
log.info('*-' * 20)
log.info("* deleted_timeslot_idx : " + str(deleted_timeslot_idx))
log.info('*-' * 20)
return Xdiff,\
new_time_slot,\
input_names,\
zero_var_list,\
zero_var_val, \
INT_type_list,\
INT_type_idx,\
FLOAT_type_list,\
FLOAT_type_idx | gpl-2.0 | 8,925,207,994,300,018,000 | 36.070909 | 167 | 0.555425 | false |
zenieldanaku/DyDCreature_Editor | backend/data.py | 1 | 1579 | from azoe.engine import Resources
RAZAS = None
CLASES = None
IDIOMAS = None
ESCUELAS = None
CONJUROS = None
HABS = None
DOTES = None
ARMAS = None
ARMDS = None
APTS = None
DOMINIOS = None
OBJMAG = None
CHARS = None
ALINI = None
TAM = None
def load_language(lang):
root = 'database/' + lang + '/'
global CHARS, RAZAS, TAM, CLASES, ALINI, IDIOMAS, HABS, DOTES, ESCUELAS, DOMINIOS, CONJUROS, ARMAS, ARMDS, OBJMAG
CHARS = Resources.abrir_json(root + 'basicos' + ".json")['caracteristicas']
TAM = Resources.abrir_json(root + 'basicos' + ".json")['tamanios']
ALINI = Resources.abrir_json(root + 'basicos' + ".json")['alineamientos']
RAZAS = Resources.abrir_json(root + 'razas' + ".json")
CLASES = Resources.abrir_json(root + 'clases' + ".json")
IDIOMAS = Resources.abrir_json(root + 'idiomas' + ".json")
HABS = Resources.abrir_json(root + 'habilidades' + ".json")
DOTES = Resources.abrir_json(root + 'dotes' + ".json")
ESCUELAS = Resources.abrir_json(root + 'escuelas' + ".json")
DOMINIOS = Resources.abrir_json(root + 'dominios' + ".json")
CONJUROS = Resources.abrir_json(root + 'conjuros' + ".json")
ARMAS = Resources.abrir_json(root + 'armas' + ".json")
ARMDS = Resources.abrir_json(root + 'armaduras' + ".json")
OBJMAG = Resources.abrir_json(root + 'objetos_magicos' + ".json")
lengua = Resources.abrir_json('config.json')['lengua']
load_language(lengua)
__all__ = "CHARS,RAZAS,TAM,CLASES,ALINI,IDIOMAS,HABS,DOTES,ESCUELAS,DOMINIOS,CONJUROS,ARMAS,ARMDS,OBJMAG".split(',')
| mit | 2,461,572,856,888,210,400 | 36.512195 | 117 | 0.650412 | false |
SDSG-Invenio/invenio | invenio/modules/formatter/__init__.py | 6 | 21654 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Format records using chosen format.
The main APIs are:
- format_record
- format_records
- create_excel
- get_output_format_content_type
This module wraps the BibFormat engine and its associated
functions. This is also where special formatting functions of multiple
records (that the engine does not handle, as it works on a single
record basis) should be defined, with name C{def create_*}.
.. seealso::
bibformat_utils.py
"""
from __future__ import print_function
import getopt
import sys
import zlib
from invenio.base.globals import cfg
# Functions to format a single record
#
def format_record(recID, of, ln=None, verbose=0, search_pattern=None,
xml_record=None, user_info=None, on_the_fly=False,
save_missing=True, force_2nd_pass=False, **kwargs):
"""Format a record in given output format.
Return a formatted version of the record in the specified
language, search pattern, and with the specified output format.
The function will define which format template must be applied.
The record to be formatted can be specified with its ID (with
'recID' parameter) or given as XML representation (with
'xml_record' parameter). If 'xml_record' is specified 'recID' is
ignored (but should still be given for reference. A dummy recid 0
or -1 could be used).
'user_info' allows to grant access to some functionalities on a
page depending on the user's priviledges. The 'user_info' object
makes sense only in the case of on-the-fly formatting. 'user_info'
is the same object as the one returned by
'webuser.collect_user_info(req)'
:param recID: the ID of record to format.
:type recID: int
:param of: an output format code (or short identifier for the output
format)
:type of: string
:param ln: the language to use to format the record
:type ln: string
:param verbose: the level of verbosity from 0 to 9.
- O: silent
- 5: errors
- 7: errors and warnings, stop if error in format elements
- 9: errors and warnings, stop if error (debug mode)
:type verbose: int
:param search_pattern: list of strings representing the user request in web
interface
:type search_pattern: list(string)
:param xml_record: an xml string represention of the record to format
:type xml_record: string or None
:param user_info: the information of the user who will view the formatted
page (if applicable)
:param on_the_fly: if False, try to return an already preformatted version
of the record in the database
:type on_the_fly: boolean
:return: formatted record
:rtype: string
"""
ln = ln or cfg['CFG_SITE_LANG']
from . import engine as bibformat_engine
out, needs_2nd_pass = bibformat_engine.format_record_1st_pass(
recID=recID,
of=of,
ln=ln,
verbose=verbose,
search_pattern=search_pattern,
xml_record=xml_record,
user_info=user_info,
on_the_fly=on_the_fly,
save_missing=save_missing,
**kwargs)
if needs_2nd_pass or force_2nd_pass:
out = bibformat_engine.format_record_2nd_pass(
recID=recID,
of=of,
template=out,
ln=ln,
verbose=verbose,
search_pattern=search_pattern,
xml_record=xml_record,
user_info=user_info,
**kwargs)
return out
def record_get_xml(recID, format='xm', decompress=zlib.decompress):
"""Return an XML string of the record given by recID.
The function builds the XML directly from the database,
without using the standard formatting process.
'format' allows to define the flavour of XML:
- 'xm' for standard XML
- 'marcxml' for MARC XML
- 'oai_dc' for OAI Dublin Core
- 'xd' for XML Dublin Core
If record does not exist, returns empty string.
:param recID: the id of the record to retrieve
:param format: the format to use
:param decompress: the library to use to decompress cache from DB
:return: the xml string of the record
"""
from . import utils as bibformat_utils
return bibformat_utils.record_get_xml(recID=recID, format=format,
decompress=decompress)
# Helper functions to do complex formatting of multiple records
#
# You should not modify format_records when adding a complex
# formatting of multiple records, but add a create_* method
# that relies on format_records to do the formatting.
#
def format_records(recIDs, of, ln=None, verbose=0, search_pattern=None,
xml_records=None, user_info=None, record_prefix=None,
record_separator=None, record_suffix=None, prologue="",
epilogue="", req=None, on_the_fly=False,
extra_context=None):
"""Format records given by a list of record IDs or a list of records as xml.
Add a prefix before each record, a suffix after each record, plus a
separator between records.
Also add optional prologue and epilogue to the complete formatted list.
You can either specify a list of record IDs to format, or a list of xml
records, but not both (if both are specified recIDs is ignored).
'record_separator' is a function that returns a string as separator between
records. The function must take an integer as unique parameter, which is
the index in recIDs (or xml_records) of the record that has just been
formatted. For example separator(i) must return the separator between
recID[i] and recID[i+1]. Alternatively separator can be a single string,
which will be used to separate all formatted records. The same applies to
'record_prefix' and 'record_suffix'.
'req' is an optional parameter on which the result of the function are
printed lively (prints records after records) if it is given. Note that you
should set 'req' content-type by yourself, and send http header before
calling this function as it will not do it.
This function takes the same parameters as :meth:`format_record` except
for:
:param recIDs: a list of record IDs
:type recIDs: list(int)
:param of: an output format code (or short identifier for the output
format)
:type of: string
:param ln: the language to use to format the record
:type ln: string
:param verbose: the level of verbosity from 0 to 9.
- 0: silent
- 5: errors
- 7: errors and warnings, stop if error in format elements
- 9: errors and warnings, stop if error (debug mode)
:type verbose: int
:param search_pattern: list of strings representing the user request in web
interface
:type search_pattern: list(string)
:param user_info: the information of the user who will view the formatted
page (if applicable)
:param xml_records: a list of xml string representions of the records to
format
:type xml_records: list(string)
:param record_prefix: a string printed before B{each} formatted records (n
times)
:type record_prefix: string
:param record_suffix: a string printed after B{each} formatted records (n
times)
:type record_suffix: string
:param prologue: a string printed at the beginning of the complete
formatted records (1x)
:type prologue: string
:param epilogue: a string printed at the end of the complete formatted
output (1x)
:type epilogue: string
:param record_separator: either a string or a function that returns string
to join formatted records
:param record_separator: string or function
:param req: an optional request object where to print records
:param on_the_fly: if False, try to return an already preformatted version
of the record in the database
:type on_the_fly: boolean
:rtype: string
"""
if req is not None:
req.write(prologue)
formatted_records = ''
# Fill one of the lists with Nones
if xml_records is not None:
recIDs = map(lambda x: None, xml_records)
else:
xml_records = map(lambda x: None, recIDs)
total_rec = len(recIDs)
last_iteration = False
for i in range(total_rec):
if i == total_rec - 1:
last_iteration = True
# Print prefix
if record_prefix is not None:
if isinstance(record_prefix, str):
formatted_records += record_prefix
if req is not None:
req.write(record_prefix)
else:
string_prefix = record_prefix(i)
formatted_records += string_prefix
if req is not None:
req.write(string_prefix)
# Print formatted record
ln = ln or cfg['CFG_SITE_LANG']
formatted_record = format_record(recIDs[i], of, ln, verbose,
search_pattern, xml_records[i],
user_info, on_the_fly, extra_context)
formatted_records += formatted_record
if req is not None:
req.write(formatted_record)
# Print suffix
if record_suffix is not None:
if isinstance(record_suffix, str):
formatted_records += record_suffix
if req is not None:
req.write(record_suffix)
else:
string_suffix = record_suffix(i)
formatted_records += string_suffix
if req is not None:
req.write(string_suffix)
# Print separator if needed
if record_separator is not None and not last_iteration:
if isinstance(record_separator, str):
formatted_records += record_separator
if req is not None:
req.write(record_separator)
else:
string_separator = record_separator(i)
formatted_records += string_separator
if req is not None:
req.write(string_separator)
if req is not None:
req.write(epilogue)
return prologue + formatted_records + epilogue
def format_with_format_template(format_template_filename, bfo,
verbose=0, format_template_code=None):
"""Wrapper around format template."""
from . import engine as bibformat_engine
evaluated_format, dummy = bibformat_engine.format_with_format_template(
format_template_filename=format_template_filename,
bfo=bfo,
verbose=verbose,
format_template_code=format_template_code)
return evaluated_format
def create_excel(recIDs, req=None, ln=None, ot=None, ot_sep="; ",
user_info=None):
"""Return an Excel readable format containing the given recIDs.
If 'req' is given, also prints the output in 'req' while individual
records are being formatted.
This method shows how to create a custom formatting of multiple
records.
The excel format is a basic HTML table that most spreadsheets
applications can parse.
If 'ot' is given, the BibFormat engine is overridden and the
output is produced on the basis of the fields that 'ot' defines
(see search_engine.perform_request_search(..) 'ot' param).
:param req: the request object
:param recIDs: a list of record IDs
:param ln: language
:param ot: a list of fields that should be included in the excel output as
columns(see perform_request_search 'ot' param)
:param ot_sep: a separator used to separate values for the same record, in
the same columns, if any
:param user_info: the user_info dictionary
:return: a string in Excel format
"""
from . import utils as bibformat_utils
# Prepare the column headers to display in the Excel file
column_headers_list = ['Title',
'Authors',
'Addresses',
'Affiliation',
'Date',
'Publisher',
'Place',
'Abstract',
'Keywords',
'Notes']
# Prepare Content
column_headers = '</b></td><td style="border-color:black; ' \
'border-style:solid; border-width:thin; ' \
'background-color:black;color:white"><b>' \
.join(column_headers_list) + ''
column_headers = '<table style="border-collapse: collapse;">\n' \
'<td style="border-color:black; border-style:solid; ' \
'border-width:thin; background-color:black;color:white">'\
'<b>' + column_headers + '</b></td>'
footer = '</table>'
# Apply content_type and print column headers
if req is not None:
req.content_type = get_output_format_content_type('excel')
req.headers_out["Content-Disposition"] = "inline; filename=results.xls"
req.send_http_header()
if ot is not None and len(ot) > 0:
# Skip BibFormat engine, produce our own output based on
# specified fields. Each field will be a column of the
# output. If a field has multiple values, then they are joined
# into the same cell.
out = "<table>"
if req:
req.write("<table>")
for recID in recIDs:
row = '<tr>'
row += '<td><a href="%(CFG_SITE_URL)s/%(CFG_SITE_RECORD)s/' \
'%(recID)i">%(recID)i</a></td>' % \
{'recID': recID, 'CFG_SITE_RECORD': cfg['CFG_SITE_RECORD'],
'CFG_SITE_URL': cfg['CFG_SITE_URL']}
for field in ot:
row += '<td>%s</td>' % \
ot_sep.join(bibformat_utils.get_all_fieldvalues(
recID, field))
row += '</tr>'
out += row
if req:
req.write(row)
out += '</table>'
if req:
req.write('</table>')
return out
# Format the records
prologue = '<meta http-equiv="Content-Type" content="text/html; ' \
'charset=utf-8"><table>'
excel_formatted_records = format_records(recIDs, 'excel',
ln=ln or cfg['CFG_SITE_LANG'],
record_separator='\n',
prologue=prologue,
epilogue=footer,
req=req,
user_info=user_info)
return excel_formatted_records
def get_output_format_content_type(of, default_content_type="text/html"):
"""
Return the content type of the given output format.
For example `text/html` or `application/ms-excel`.
:param of: the code of output format for which we want to get the content
type
:param default_content_type: default content-type when content-type was not
set up
:return: the content-type to use for this output format
"""
from . import api
content_type = api.get_output_format_content_type(of)
if content_type == '':
content_type = default_content_type
return content_type
def print_records(recIDs, of='hb', ln=None, verbose=0,
search_pattern='', on_the_fly=False, **ctx):
"""Return records using Jinja template."""
import time
from math import ceil
from flask import request
from invenio.base.i18n import wash_language
from invenio.ext.template import render_template_to_string
from invenio.modules.search.models import Format
from invenio.utils.pagination import Pagination
from invenio.modules.formatter.engine import \
TEMPLATE_CONTEXT_FUNCTIONS_CACHE
of = of.lower()
jrec = request.values.get('jrec', ctx.get('jrec', 1), type=int)
rg = request.values.get('rg', ctx.get('rg', 10), type=int)
ln = ln or wash_language(request.values.get('ln', cfg['CFG_SITE_LANG']))
ot = (request.values.get('ot', ctx.get('ot')) or '').split(',')
records = ctx.get('records', len(recIDs))
if jrec > records:
jrec = rg * (records // rg) + 1
pages = int(ceil(jrec / float(rg))) if rg > 0 else 1
context = dict(
of=of, jrec=jrec, rg=rg, ln=ln, ot=ot,
facets={},
time=time,
recids=recIDs,
pagination=Pagination(pages, rg, records),
verbose=verbose,
export_formats=Format.get_export_formats(),
format_record=format_record,
**TEMPLATE_CONTEXT_FUNCTIONS_CACHE.template_context_functions
)
context.update(ctx)
return render_template_to_string(
['format/records/%s.tpl' % of,
'format/records/%s.tpl' % of[0],
'format/records/%s.tpl' % get_output_format_content_type(of).
replace('/', '_')],
**context)
def usage(exitcode=1, msg=""):
"""
Print usage info.
:param exitcode: exit code to use (eg. 1 for error, 0 for okay)
:param msg: message to print
:return: exit the process
"""
if msg:
sys.stderr.write("Error: %s.\n" % msg)
print("""BibFormat: outputs the result of the formatting of a record.
Usage: bibformat required [options]
Examples:
$ bibformat -i 10 -o HB
$ bibformat -i 10,11,13 -o HB
$ bibformat -i 10:13
$ bibformat -i 10 -o HB -v 9
Required:
-i, --id=ID[ID2,ID3:ID5] ID (or range of IDs) of the record(s) to be
formatted.
Options:
-o, --output=CODE short code of the output format used for
formatting (default HB).
-l, --lang=LN language used for formatting.
-y, --onthefly on-the-fly formatting, avoiding caches created
by BibReformat.
General options:
-h, --help print this help and exit
-v, --verbose=LEVEL verbose level (from 0 to 9, default 0)
""")
sys.exit(exitcode)
def main():
"""
Main entry point for biformat via command line.
:return: formatted record(s) as specified by options, or help/errors
"""
options = {} # will hold command-line options
options["verbose"] = 0
options["onthefly"] = False
options["lang"] = cfg['CFG_SITE_LANG']
options["output"] = "HB"
options["recID"] = None
try:
opts, args = getopt.getopt(sys.argv[1:],
"hVv:yl:i:o:",
["help",
"version",
"verbose=",
"onthefly",
"lang=",
"id=",
"output="])
except getopt.GetoptError as err:
usage(1, err)
pass
try:
for opt in opts:
if opt[0] in ["-h", "--help"]:
usage(0)
elif opt[0] in ["-v", "--verbose"]:
options["verbose"] = int(opt[1])
elif opt[0] in ["-y", "--onthefly"]:
options["onthefly"] = True
elif opt[0] in ["-l", "--lang"]:
options["lang"] = opt[1]
elif opt[0] in ["-i", "--id"]:
recIDs = []
for recID in opt[1].split(','):
if ":" in recID:
start = int(recID.split(':')[0])
end = int(recID.split(':')[1])
recIDs.extend(range(start, end))
else:
recIDs.append(int(recID))
options["recID"] = recIDs
elif opt[0] in ["-o", "--output"]:
options["output"] = opt[1]
if options["recID"] is None:
usage(1, "-i argument is needed")
except StandardError as e:
usage(e)
print(format_records(recIDs=options["recID"],
of=options["output"],
ln=options["lang"],
verbose=options["verbose"],
on_the_fly=options["onthefly"]))
return
if __name__ == "__main__":
main()
| gpl-2.0 | 6,459,642,641,651,026,000 | 36.856643 | 80 | 0.579708 | false |
Sixshaman/networkx | networkx/generators/random_clustered.py | 10 | 4534 | # -*- coding: utf-8 -*-
"""Generate graphs with given degree and triangle sequence.
"""
# Copyright (C) 2004-2016 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import random
import networkx as nx
__author__ = "\n".join(['Aric Hagberg ([email protected])',
'Joel Miller ([email protected])'])
__all__ = ['random_clustered_graph']
def random_clustered_graph(joint_degree_sequence, create_using=None,
seed=None):
"""Generate a random graph with the given joint independent edge degree and
triangle degree sequence.
This uses a configuration model-like approach to generate a random graph
(with parallel edges and self-loops) by randomly assigning edges to match
the given joint degree sequence.
The joint degree sequence is a list of pairs of integers of the form
`[(d_{1,i}, d_{1,t}), \dotsc, (d_{n,i}, d_{n,t})]`. According to this list,
vertex `u` is a member of `d_{u,t}` triangles and has `d_{u, i}` other
edges. The number `d_{u,t}` is the *triangle degree* of `u` and the number
`d_{u,i}` is the *independent edge degree*.
Parameters
----------
joint_degree_sequence : list of integer pairs
Each list entry corresponds to the independent edge degree and
triangle degree of a node.
create_using : graph, optional (default MultiGraph)
Return graph of this type. The instance will be cleared.
seed : hashable object, optional
The seed for the random number generator.
Returns
-------
G : MultiGraph
A graph with the specified degree sequence. Nodes are labeled
starting at 0 with an index corresponding to the position in
deg_sequence.
Raises
------
NetworkXError
If the independent edge degree sequence sum is not even
or the triangle degree sequence sum is not divisible by 3.
Notes
-----
As described by Miller [1]_ (see also Newman [2]_ for an equivalent
description).
A non-graphical degree sequence (not realizable by some simple
graph) is allowed since this function returns graphs with self
loops and parallel edges. An exception is raised if the
independent degree sequence does not have an even sum or the
triangle degree sequence sum is not divisible by 3.
This configuration model-like construction process can lead to
duplicate edges and loops. You can remove the self-loops and
parallel edges (see below) which will likely result in a graph
that doesn't have the exact degree sequence specified. This
"finite-size effect" decreases as the size of the graph increases.
References
----------
.. [1] Joel C. Miller. "Percolation and epidemics in random clustered
networks". In: Physical review. E, Statistical, nonlinear, and soft
matter physics 80 (2 Part 1 August 2009).
.. [2] M. E. J. Newman. "Random Graphs with Clustering".
In: Physical Review Letters 103 (5 July 2009)
Examples
--------
>>> deg = [(1, 0), (1, 0), (1, 0), (2, 0), (1, 0), (2, 1), (0, 1), (0, 1)]
>>> G = nx.random_clustered_graph(deg)
To remove parallel edges:
>>> G = nx.Graph(G)
To remove self loops:
>>> G.remove_edges_from(G.selfloop_edges())
"""
if create_using is None:
create_using = nx.MultiGraph()
elif create_using.is_directed():
raise nx.NetworkXError("Directed Graph not supported")
if not seed is None:
random.seed(seed)
# In Python 3, zip() returns an iterator. Make this into a list.
joint_degree_sequence = list(joint_degree_sequence)
N = len(joint_degree_sequence)
G = nx.empty_graph(N,create_using)
ilist = []
tlist = []
for n in G:
degrees = joint_degree_sequence[n]
for icount in range(degrees[0]):
ilist.append(n)
for tcount in range(degrees[1]):
tlist.append(n)
if len(ilist)%2 != 0 or len(tlist)%3 != 0:
raise nx.NetworkXError('Invalid degree sequence')
random.shuffle(ilist)
random.shuffle(tlist)
while ilist:
G.add_edge(ilist.pop(),ilist.pop())
while tlist:
n1 = tlist.pop()
n2 = tlist.pop()
n3 = tlist.pop()
G.add_edges_from([(n1,n2),(n1,n3),(n2,n3)])
G.name = "random_clustered %d nodes %d edges"%(G.order(),G.size())
return G
| bsd-3-clause | -6,916,573,096,786,098,000 | 33.348485 | 79 | 0.634098 | false |
alexforencich/python-ivi | ivi/rigol/rigolDSSource.py | 1 | 19204 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import struct
from .. import ivi
from .. import fgen
OutputMode = set(['function', 'arbitrary'])
OperationMode = set(['continuous'])
StandardWaveformMapping = {
'sine': 'sin',
'square': 'squ',
'triangle': 'ramp',
'ramp_up': 'ramp',
'ramp_down': 'ramp',
'dc': 'dc',
'pulse': 'puls',
'noise': 'nois',
'sinc': 'sinc',
'exprise': 'expr',
'expfall': 'expf',
'cardiac': 'ecg',
'gaussian': 'gaus',
'lorentz': 'lor',
'haversine': 'hav'
}
class rigolDSSource(fgen.Base, fgen.StdFunc, fgen.ArbWfm, fgen.ArbFrequency,
fgen.ArbChannelWfm):
"Rigol DSO internal source IVI function generator driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
self._output_standard_waveform_symmetry = list()
super(rigolDSSource, self).__init__(*args, **kwargs)
# Internal source
self._output_count = 2
self._arbitrary_sample_rate = 0
self._arbitrary_waveform_number_waveforms_max = 0
self._arbitrary_waveform_size_max = 16384
self._arbitrary_waveform_size_min = 2
self._arbitrary_waveform_quantum = 1
self._add_property('outputs[].standard_waveform.symmetry',
self._get_output_standard_waveform_symmetry,
self._set_output_standard_waveform_symmetry,
None,
"""
Specifies the symmetry for a ramp or triangle waveform. This attribute
affects function generator behavior only when the Waveform attribute is
set to Waveform Triangle, Ramp Up, or Ramp Down. The value is expressed
as a percentage.
""")
self._identity_description = "Rigol DSO internal source IVI function generator driver"
self._identity_supported_instrument_models = []
self._init_outputs()
def _init_outputs(self):
try:
super(rigolDSSource, self)._init_outputs()
except AttributeError:
pass
self._output_name = list()
self._output_operation_mode = list()
self._output_enabled = list()
self._output_impedance = list()
self._output_mode = list()
self._output_reference_clock_source = list()
self._output_standard_waveform_ramp_symmetry = list()
for i in range(self._output_count):
self._output_name.append("source%d" % (i+1))
self._output_operation_mode.append('continuous')
self._output_enabled.append(False)
self._output_impedance.append(50)
self._output_mode.append('function')
self._output_reference_clock_source.append('internal')
self._output_standard_waveform_symmetry.append(50.0)
self.outputs._set_list(self._output_name)
# AFG option
def _get_output_operation_mode(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_operation_mode[index]
def _set_output_operation_mode(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in OperationMode:
raise ivi.ValueNotSupportedException()
self._output_operation_mode[index] = value
def _get_output_enabled(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:output:state?" % self._output_name[index])
self._output_enabled[index] = resp == 'ON'
self._set_cache_valid(index=index)
return self._output_enabled[index]
def _set_output_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
if not self._driver_operation_simulate:
self._write(":%s:output:state %d" % (self._output_name[index], value))
self._output_enabled[index] = value
self._set_cache_valid(index=index)
def _get_output_impedance(self, index):
index = ivi.get_index(self._analog_channel_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
val = self._ask(":%s:output:impedance?" % self._output_name[index])
if val == 'HIGHZ':
self._output_impedance[index] = 1000000
elif val == 'FIF':
self._output_impedance[index] = 50
self._set_cache_valid(index=index)
return self._output_impedance[index]
def _set_output_impedance(self, index, value):
value = float(value)
index = ivi.get_index(self._analog_channel_name, index)
if value != 50 and value != 1000000:
raise Exception('Invalid impedance selection')
if not self._driver_operation_simulate:
if value == 1000000:
self._write(":%s:output:impedance highz" % self._output_name[index])
elif value == 50:
self._write(":%s:output:impedance fifty" % self._output_name[index])
self._output_impedance[index] = value
self._set_cache_valid(index=index)
self._set_cache_valid(False, 'output_standard_waveform_amplitude', index)
def _get_output_mode(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:function?" % self._output_name[index]).lower()
if resp == 'ext':
self._output_mode[index] = 'arbitrary'
else:
self._output_mode[index] = 'function'
self._set_cache_valid(index=index)
return self._output_mode[index]
def _set_output_mode(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in OutputMode:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
if value == 'arbitrary':
self._write(":%s:function ext" % self._output_name[index])
else:
if self._get_cache_valid('output_standard_waveform_waveform', index=index):
self._set_output_standard_waveform_waveform(index, self._output_standard_waveform_waveform[index])
else:
self._set_output_standard_waveform_waveform(index, 'sine')
self._output_mode[index] = value
self._set_cache_valid(index=index)
def _get_output_reference_clock_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_reference_clock_source[index]
def _set_output_reference_clock_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = 'internal'
self._output_reference_clock_source[index] = value
def abort_generation(self):
pass
def initiate_generation(self):
pass
def _get_output_standard_waveform_amplitude(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:voltage:amplitude?" % self._output_name[index])
self._output_standard_waveform_amplitude[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_amplitude[index]
def _set_output_standard_waveform_amplitude(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0.01 or value > 5.0:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write(":%s:voltage:amplitude %e" % (self._output_name[index], value))
self._output_standard_waveform_amplitude[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_dc_offset(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:voltage:offset?" % self._output_name[index])
self._output_standard_waveform_dc_offset[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_dc_offset[index]
def _set_output_standard_waveform_dc_offset(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write(":%s:voltage:offset %e" % (self._output_name[index], value))
self._output_standard_waveform_dc_offset[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_duty_cycle_high(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:pulse:dcycle?" % self._output_name[index])
self._output_standard_waveform_duty_cycle_high[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_duty_cycle_high[index]
def _set_output_standard_waveform_duty_cycle_high(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 10.0 or value > 90.0:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write(":%s:pulse:dcycle %e" % (self._output_name[index], value))
self._output_standard_waveform_duty_cycle_high[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_symmetry(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:function:ramp:symmetry?" % self._output_name[index])
self._output_standard_waveform_symmetry[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_symmetry[index]
def _set_output_standard_waveform_symmetry(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0.0 or value > 100.0:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write(":%s:function:ramp:symmetry %e" % (self._output_name[index], value))
self._output_standard_waveform_symmetry[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_start_phase(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:phase?" % self._output_name[index])
self._output_standard_waveform_start_phase[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_start_phase[index]
def _set_output_standard_waveform_start_phase(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value) % 360
if value < 0 or value > 360.0:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write(":%s:phase %e" % (self._output_name[index], value))
self._output_standard_waveform_start_phase[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_frequency(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:frequency?" % self._output_name[index])
self._output_standard_waveform_frequency[index] = float(resp)
self._set_cache_valid(index=index)
return self._output_standard_waveform_frequency[index]
def _set_output_standard_waveform_frequency(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0.1 or value > 25e6:
raise ivi.OutOfRangeException()
if not self._driver_operation_simulate:
self._write(":%s:frequency %e" % (self._output_name[index], value))
self._output_standard_waveform_frequency[index] = value
self._set_cache_valid(index=index)
def _get_output_standard_waveform_waveform(self, index):
index = ivi.get_index(self._output_name, index)
if not self._driver_operation_simulate and not self._get_cache_valid(index=index):
resp = self._ask(":%s:function?" % self._output_name[index]).lower()
if resp == 'arbitrary':
resp = 'sine'
resp = [k for k,v in StandardWaveformMapping.items() if v==resp][0]
if resp == 'ramp_up':
if self._get_output_standard_waveform_symmetry(index) <= 10.0:
resp = 'ramp_down'
elif self._get_output_standard_waveform_symmetry(index) >= 90.0:
resp = 'ramp_up'
else:
resp = 'triangle'
self._output_standard_waveform_waveform[index] = resp
self._set_cache_valid(index=index)
return self._output_standard_waveform_waveform[index]
def _set_output_standard_waveform_waveform(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in StandardWaveformMapping:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
self._write(":%s:function %s" % (self._output_name[index], StandardWaveformMapping[value]))
if value == 'triangle':
if self._get_output_standard_waveform_symmetry(index) <= 10.0 or self._get_output_standard_waveform_symmetry(index) >= 90:
self._set_output_standard_waveform_symmetry(index, 50.0)
elif value == 'ramp_up':
self._set_output_standard_waveform_symmetry(index, 100.0)
elif value == 'ramp_down':
self._set_output_standard_waveform_symmetry(index, 0.0)
self._output_standard_waveform_waveform[index] = value
self._set_cache_valid(index=index)
self._output_mode[index] = 'function'
self._set_cache_valid(True, 'output_mode', index=index)
def _get_output_arbitrary_gain(self, index):
return self._get_output_standard_waveform_amplitude(index)
def _set_output_arbitrary_gain(self, index, value):
self._set_output_standard_waveform_amplitude(index, value)
def _get_output_arbitrary_offset(self, index):
return self._get_output_standard_waveform_dc_offset(index)
def _set_output_arbitrary_offset(self, index, value):
self._set_output_standard_waveform_dc_offset(index, value)
def _get_output_arbitrary_waveform(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_arbitrary_waveform[index]
def _set_output_arbitrary_waveform(self, index, value):
index = ivi.get_index(self._output_name, index)
value = str(value)
self._output_arbitrary_waveform[index] = value
def _get_arbitrary_sample_rate(self):
return self._arbitrary_sample_rate
def _set_arbitrary_sample_rate(self, value):
value = float(value)
self._arbitrary_sample_rate = value
def _get_arbitrary_waveform_number_waveforms_max(self):
return self._arbitrary_waveform_number_waveforms_max
def _get_arbitrary_waveform_size_max(self):
return self._arbitrary_waveform_size_max
def _get_arbitrary_waveform_size_min(self):
return self._arbitrary_waveform_size_min
def _get_arbitrary_waveform_quantum(self):
return self._arbitrary_waveform_quantum
def _arbitrary_waveform_clear(self, handle):
pass
def _arbitrary_waveform_configure(self, index, handle, gain, offset):
self._set_output_arbitrary_waveform(index, handle)
self._set_output_arbitrary_gain(index, gain)
self._set_output_arbitrary_offset(index, offset)
def _arbitrary_waveform_create(self, data):
return "handle"
def _get_output_arbitrary_frequency(self, index):
return self._get_output_standard_waveform_frequency(index)
def _set_output_arbitrary_frequency(self, index, value):
self._set_output_standard_waveform_frequency(index, value)
def _arbitrary_waveform_create_channel_waveform(self, index, data):
y = None
x = None
if type(data) == list and type(data[0]) == float:
# list
y = array(data)
elif type(data) == np.ndarray and len(data.shape) == 1:
# 1D array
y = data
elif type(data) == np.ndarray and len(data.shape) == 2 and data.shape[0] == 1:
# 2D array, hieght 1
y = data[0]
elif type(data) == np.ndarray and len(data.shape) == 2 and data.shape[1] == 1:
# 2D array, width 1
y = data[:,0]
else:
x, y = ivi.get_sig(data)
if len(y) % self._arbitrary_waveform_quantum != 0:
raise ivi.ValueNotSupportedException()
# clip on [-1,1] and rescale to [0,1]
yc = (y.clip(-1, 1)+1)/2
# scale to 14 bits
yb = np.rint(yc * ((1 << 14)-1)).astype(int) & 0x00003fff
raw_data = yb.astype('<i2').tobytes()
# space required before IEEE block due to Rigol firmware bug wrt. data alignment in scope memory
self._write_ieee_block(raw_data, ':trace%d:data:dac volatile, ' % (index+1))
return self._output_name[index]
| mit | -7,822,072,069,273,970,000 | 43.147126 | 138 | 0.627578 | false |
jldbc/pybaseball | tests/integration/pybaseball/enums/fangraphs/test_pitching_data_enum.py | 1 | 1327 | import lxml.etree
import requests
from pybaseball.enums.fangraphs.pitching_data_enum import FangraphsPitchingStats
from tests.integration.pybaseball.enums.fangraphs.transforms import transform_leaderboard_item
def test_enums_vs_fangraphs_column_list() -> None:
"""
Go and get all the supported columns out of Fangraphs' "Custom Query" column selector. Compare this list
to our enum of supported columns and ensure we've covered them 100%.
"""
sample_pitching_url = "https://www.fangraphs.com/leaders.aspx?pos=all&stats=pit&lg=all&qual=y&type=8&season=2020&month=0&season1=2020&ind=0"
sample_pitching_result = requests.get(sample_pitching_url)
parsed_result = lxml.etree.HTML(sample_pitching_result.content.decode('utf-8'))
custom_leaderboards_items = sorted(
list({x for x in parsed_result.xpath('//ul[@class="rlbList"]/li[@class="rlbItem"]/text()')
if x != 'Line Break'})
)
custom_leaderboards_items = sorted([transform_leaderboard_item(x) for x in custom_leaderboards_items])
current_leaderboard_items = sorted(
[str(x).split('.')[1] for x in FangraphsPitchingStats.ALL()
if x not in [FangraphsPitchingStats.COMMON, FangraphsPitchingStats.LINE_BREAK]]
)
assert custom_leaderboards_items == current_leaderboard_items
| mit | -3,920,005,448,577,044,500 | 41.806452 | 144 | 0.716654 | false |
rvs/gpdb | gpMgmt/bin/gpload.py | 1 | 101392 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# gpload - load file(s) into Greenplum Database
# Copyright Greenplum 2008
'''gpload [options] -f configuration file
Options:
-h hostname: host to connect to
-p port: port to connect to
-U username: user to connect as
-d database: database to connect to
-W: force password authentication
-q: quiet mode
-D: do not actually load data
-v: verbose
-V: very verbose
-l logfile: log output to logfile
--no_auto_trans: do not wrap gpload in transaction
--gpfdist_timeout timeout: gpfdist timeout value
--version: print version number and exit
-?: help
'''
import sys
if sys.hexversion<0x2040400:
sys.stderr.write("gpload needs python 2.4.4 or higher\n")
sys.exit(2)
try:
import yaml
except ImportError:
sys.stderr.write("gpload needs pyyaml. You can get it from http://pyyaml.org.\n")
sys.exit(2)
import platform
try:
from pygresql import pg
except Exception, e:
from struct import calcsize
sysWordSize = calcsize("P") * 8
if (platform.system()) in ['Windows', 'Microsoft'] and (sysWordSize == 64):
errorMsg = "gpload appears to be running in 64-bit Python under Windows.\n"
errorMsg = errorMsg + "Currently only 32-bit Python is supported. Please \n"
errorMsg = errorMsg + "reinstall a 32-bit Python interpreter.\n"
else:
errorMsg = "gpload was unable to import The PyGreSQL Python module (pg.py) - %s\n" % str(e)
sys.stderr.write(str(errorMsg))
sys.exit(2)
import hashlib
import datetime,getpass,os,signal,socket,subprocess,threading,time,traceback,re
import uuid
import socket
thePlatform = platform.system()
if thePlatform in ['Windows', 'Microsoft']:
windowsPlatform = True
else:
windowsPlatform = False
if windowsPlatform == False:
import select
EXECNAME = 'gpload'
NUM_WARN_ROWS = 0
# Mapping for validing our configuration file. We're only concerned with
# keys -- stuff left of ':'. It gets complex in two cases: firstly when
# we handle blocks which have keys which are not keywords -- such as under
# COLUMNS:. Secondly, we want to detect when users put keywords in the wrong
# place. To that end, the mapping is structured such that:
#
# key -> { 'parse_children' -> [ True | False ],
# 'parent' -> <parent name> }
#
# Each key is a keyword in the configuration file. parse_children tells us
# whether children are expected to be keywords. parent tells us the parent
# keyword or None
valid_tokens = {
"version": {'parse_children': True, 'parent': None},
"database": {'parse_children': True, 'parent': None},
"user": {'parse_children': True, 'parent': None},
"host": {'parse_children': True, 'parent': None},
"port": {'parse_children': True, 'parent': [None, "source"]},
"password": {'parse_children': True, 'parent': None},
"gpload": {'parse_children': True, 'parent': None},
"input": {'parse_children': True, 'parent': "gpload"},
"source": {'parse_children': True, 'parent': "input"},
"local_hostname": {'parse_children': False, 'parent': "source"},
"port_range": {'parse_children': False, 'parent': "source"},
"file": {'parse_children': False, 'parent': "source"},
"ssl": {'parse_children': False, 'parent': "source"},
"certificates_path": {'parse_children': False, 'parent': "source"},
"columns": {'parse_children': False, 'parent': "input"},
"transform": {'parse_children': True, 'parent': "input"},
"transform_config": {'parse_children': True, 'parent': "input"},
"max_line_length": {'parse_children': True, 'parent': "input"},
"format": {'parse_children': True, 'parent': "input"},
"delimiter": {'parse_children': True, 'parent': "input"},
"escape": {'parse_children': True, 'parent': "input"},
"null_as": {'parse_children': True, 'parent': "input"},
"quote": {'parse_children': True, 'parent': "input"},
"encoding": {'parse_children': True, 'parent': "input"},
"force_not_null": {'parse_children': False, 'parent': "input"},
"error_limit": {'parse_children': True, 'parent': "input"},
"error_percent": {'parse_children': True, 'parent': "input"},
"error_table": {'parse_children': True, 'parent': "input"},
"log_errors": {'parse_children': False, 'parent': "input"},
"header": {'parse_children': True, 'parent': "input"},
"fully_qualified_domain_name": {'parse_children': False, 'parent': 'input'},
"output": {'parse_children': True, 'parent': "gpload"},
"table": {'parse_children': True, 'parent': "output"},
"mode": {'parse_children': True, 'parent': "output"},
"match_columns": {'parse_children': False, 'parent': "output"},
"update_columns": {'parse_children': False, 'parent': "output"},
"update_condition": {'parse_children': True, 'parent': "output"},
"mapping": {'parse_children': False, 'parent': "output"},
"including_defaults": {'parse_children': False, 'parent': 'output'},
"preload": {'parse_children': True, 'parent': 'gpload'},
"truncate": {'parse_children': False, 'parent': 'preload'},
"reuse_tables": {'parse_children': False, 'parent': 'preload'},
"sql": {'parse_children': True, 'parent': 'gpload'},
"before": {'parse_children': False, 'parent': 'sql'},
"after": {'parse_children': False, 'parent': 'sql'},
"external": {'parse_children': True, 'parent': 'gpload'},
"schema": {'parse_children': False, 'parent': 'external'}}
_abbrevs = [
(1<<50L, ' PB'),
(1<<40L, ' TB'),
(1<<30L, ' GB'),
(1<<20L, ' MB'),
(1<<10L, ' kB'),
(1, ' bytes')
]
received_kill = False
keywords = {
"abort": True,
"absolute": True,
"access": True,
"action": True,
"active": True,
"add": True,
"admin": True,
"after": True,
"aggregate": True,
"all": True,
"also": True,
"alter": True,
"analyse": True,
"analyze": True,
"and": True,
"any": True,
"array": True,
"as": True,
"asc": True,
"assertion": True,
"assignment": True,
"asymmetric": True,
"at": True,
"authorization": True,
"backward": True,
"before": True,
"begin": True,
"between": True,
"bigint": True,
"binary": True,
"bit": True,
"boolean": True,
"both": True,
"by": True,
"cache": True,
"called": True,
"cascade": True,
"cascaded": True,
"case": True,
"cast": True,
"chain": True,
"char": True,
"character": True,
"characteristics": True,
"check": True,
"checkpoint": True,
"class": True,
"close": True,
"cluster": True,
"coalesce": True,
"collate": True,
"column": True,
"comment": True,
"commit": True,
"committed": True,
"concurrently": True,
"connection": True,
"constraint": True,
"constraints": True,
"conversion": True,
"convert": True,
"copy": True,
"cost": True,
"create": True,
"createdb": True,
"createrole": True,
"createuser": True,
"cross": True,
"csv": True,
"cube": True,
"current": True,
"current_date": True,
"current_role": True,
"current_time": True,
"current_timestamp": True,
"current_user": True,
"cursor": True,
"cycle": True,
"database": True,
"day": True,
"deallocate": True,
"dec": True,
"decimal": True,
"declare": True,
"default": True,
"defaults": True,
"deferrable": True,
"deferred": True,
"definer": True,
"delete": True,
"delimiter": True,
"delimiters": True,
"desc": True,
"disable": True,
"distinct": True,
"distributed": True,
"do": True,
"domain": True,
"double": True,
"drop": True,
"each": True,
"else": True,
"enable": True,
"encoding": True,
"encrypted": True,
"end": True,
"errors": True,
"escape": True,
"every": True,
"except": True,
"exchange": True,
"exclude": True,
"excluding": True,
"exclusive": True,
"execute": True,
"exists": True,
"explain": True,
"external": True,
"extract": True,
"false": True,
"fetch": True,
"fields": True,
"fill": True,
"filter": True,
"first": True,
"float": True,
"following": True,
"for": True,
"force": True,
"foreign": True,
"format": True,
"forward": True,
"freeze": True,
"from": True,
"full": True,
"function": True,
"global": True,
"grant": True,
"granted": True,
"greatest": True,
"group": True,
"group_id": True,
"grouping": True,
"handler": True,
"hash": True,
"having": True,
"header": True,
"hold": True,
"host": True,
"hour": True,
"if": True,
"ignore": True,
"ilike": True,
"immediate": True,
"immutable": True,
"implicit": True,
"in": True,
"including": True,
"inclusive": True,
"increment": True,
"index": True,
"indexes": True,
"inherit": True,
"inherits": True,
"initially": True,
"inner": True,
"inout": True,
"input": True,
"insensitive": True,
"insert": True,
"instead": True,
"int": True,
"integer": True,
"intersect": True,
"interval": True,
"into": True,
"invoker": True,
"is": True,
"isnull": True,
"isolation": True,
"join": True,
"keep": True,
"key": True,
"lancompiler": True,
"language": True,
"large": True,
"last": True,
"leading": True,
"least": True,
"left": True,
"level": True,
"like": True,
"limit": True,
"list": True,
"listen": True,
"load": True,
"local": True,
"localtime": True,
"localtimestamp": True,
"location": True,
"lock": True,
"log": True,
"login": True,
"master": True,
"match": True,
"maxvalue": True,
"merge": True,
"minute": True,
"minvalue": True,
"mirror": True,
"missing": True,
"mode": True,
"modify": True,
"month": True,
"move": True,
"names": True,
"national": True,
"natural": True,
"nchar": True,
"new": True,
"next": True,
"no": True,
"nocreatedb": True,
"nocreaterole": True,
"nocreateuser": True,
"noinherit": True,
"nologin": True,
"none": True,
"noovercommit": True,
"nosuperuser": True,
"not": True,
"nothing": True,
"notify": True,
"notnull": True,
"nowait": True,
"null": True,
"nullif": True,
"numeric": True,
"object": True,
"of": True,
"off": True,
"offset": True,
"oids": True,
"old": True,
"on": True,
"only": True,
"operator": True,
"option": True,
"or": True,
"order": True,
"others": True,
"out": True,
"outer": True,
"over": True,
"overcommit": True,
"overlaps": True,
"overlay": True,
"owned": True,
"owner": True,
"partial": True,
"partition": True,
"partitions": True,
"password": True,
"percent": True,
"placing": True,
"position": True,
"preceding": True,
"precision": True,
"prepare": True,
"prepared": True,
"preserve": True,
"primary": True,
"prior": True,
"privileges": True,
"procedural": True,
"procedure": True,
"queue": True,
"quote": True,
"randomly": True,
"range": True,
"read": True,
"real": True,
"reassign": True,
"recheck": True,
"references": True,
"reindex": True,
"reject": True,
"relative": True,
"release": True,
"rename": True,
"repeatable": True,
"replace": True,
"reset": True,
"resource": True,
"restart": True,
"restrict": True,
"returning": True,
"returns": True,
"revoke": True,
"right": True,
"role": True,
"rollback": True,
"rollup": True,
"row": True,
"rows": True,
"rule": True,
"savepoint": True,
"schema": True,
"scroll": True,
"second": True,
"security": True,
"segment": True,
"select": True,
"sequence": True,
"serializable": True,
"session": True,
"session_user": True,
"set": True,
"setof": True,
"sets": True,
"share": True,
"show": True,
"similar": True,
"simple": True,
"smallint": True,
"some": True,
"split": True,
"stable": True,
"start": True,
"statement": True,
"statistics": True,
"stdin": True,
"stdout": True,
"storage": True,
"strict": True,
"subpartition": True,
"subpartitions": True,
"substring": True,
"superuser": True,
"symmetric": True,
"sysid": True,
"system": True,
"table": True,
"tablespace": True,
"temp": True,
"template": True,
"temporary": True,
"then": True,
"threshold": True,
"ties": True,
"time": True,
"timestamp": True,
"to": True,
"trailing": True,
"transaction": True,
"transform": True,
"treat": True,
"trigger": True,
"trim": True,
"true": True,
"truncate": True,
"trusted": True,
"type": True,
"unbounded": True,
"uncommitted": True,
"unencrypted": True,
"union": True,
"unique": True,
"unknown": True,
"unlisten": True,
"until": True,
"update": True,
"user": True,
"using": True,
"vacuum": True,
"valid": True,
"validation": True,
"validator": True,
"values": True,
"varchar": True,
"varying": True,
"verbose": True,
"view": True,
"volatile": True,
"web": True,
"when": True,
"where": True,
"window": True,
"with": True,
"without": True,
"work": True,
"write": True,
"year": True,
"zone": True
}
def is_keyword(tab):
if tab in keywords:
return True
else:
return False
def caseInsensitiveDictLookup(key, dictionary):
"""
Do a case insensitive dictionary lookup. Return the dictionary value if found,
or None if not found.
"""
for entry in dictionary:
if entry.lower() == key.lower():
return dictionary[entry]
return None
def sqlIdentifierCompare(x, y):
"""
Compare x and y as SQL identifiers. Use SQL rules for comparing delimited
and non-delimited identifiers. Return True if they are equivalent or False
if they are not equivalent.
"""
if x == None or y == None:
return False
if isDelimited(x):
x = quote_unident(x)
else:
x = x.lower()
if isDelimited(y):
y = quote_unident(y)
else:
y = y.lower()
if x == y:
return True
else:
return False
def isDelimited(value):
"""
This method simply checks to see if the user supplied value has delimiters.
That is, if it starts and ends with double-quotes, then it is delimited.
"""
if len(value) < 2:
return False
if value[0] == '"' and value[-1] == '"':
return True
else:
return False
def convertListToDelimited(identifiers):
"""
This method will convert a list of identifiers, which may be a mix of
delimited and non-delimited identifiers, and return a list of
delimited identifiers.
"""
returnList = []
for id in identifiers:
if isDelimited(id) == False:
id = id.lower()
returnList.append(quote_ident(id))
else:
returnList.append(id)
return returnList
def splitUpMultipartIdentifier(id):
"""
Given a sql identifer like sch.tab, return a list of its
individual elements (e.g. sch.tab would return ['sch','tab']
"""
returnList = []
elementList = splitIntoLiteralsAndNonLiterals(id, quoteValue='"')
# If there is a leading empty string, remove it.
if elementList[0] == ' ':
elementList.pop(0)
# Remove the dots, and split up undelimited multipart names
for e in elementList:
if e != '.':
if e[0] != '"':
subElementList = e.split('.')
else:
subElementList = [e]
for se in subElementList:
# remove any empty elements
if se != '':
returnList.append(se)
return returnList
def splitIntoLiteralsAndNonLiterals(str1, quoteValue="'"):
"""
Break the string (str1) into a list of literals and non-literals where every
even number element is a non-literal and every odd number element is a literal.
The delimiter between literals and non-literals is the quoteValue, so this
function will not take into account any modifiers on a literal (e.g. E'adf').
"""
returnList = []
if len(str1) > 1 and str1[0] == quoteValue:
# Always start with a non-literal
str1 = ' ' + str1
inLiteral = False
i = 0
tokenStart = 0
while i < len(str1):
if str1[i] == quoteValue:
if inLiteral == False:
# We are at start of literal
inLiteral = True
returnList.append(str1[tokenStart:i])
tokenStart = i
elif i + 1 < len(str1) and str1[i+1] == quoteValue:
# We are in a literal and found quote quote, so skip over it
i = i + 1
else:
# We are at the end of a literal or end of str1
returnList.append(str1[tokenStart:i+1])
tokenStart = i + 1
inLiteral = False
i = i + 1
if tokenStart < len(str1):
returnList.append(str1[tokenStart:])
return returnList
def quote_ident(val):
"""
This method returns a new string replacing " with "",
and adding a " at the start and end of the string.
"""
return '"' + val.replace('"', '""') + '"'
def quote_unident(val):
"""
This method returns a new string replacing "" with ",
and removing the " at the start and end of the string.
"""
if val != None and len(val) > 0:
val = val.replace('""', '"')
if val != None and len(val) > 1 and val[0] == '"' and val[-1] == '"':
val = val[1:-1]
return val
def notice_processor(self):
if windowsPlatform == True:
# We don't have a pygresql with our notice fix, so skip for windows.
# This means we will not get any warnings on windows (MPP10989).
return
theNotices = self.db.notices()
r = re.compile("^NOTICE: Found (\d+) data formatting errors.*")
messageNumber = 0
m = None
while messageNumber < len(theNotices) and m == None:
aNotice = theNotices[messageNumber]
m = r.match(aNotice)
messageNumber = messageNumber + 1
if m:
global NUM_WARN_ROWS
NUM_WARN_ROWS = int(m.group(1))
def handle_kill(signum, frame):
# already dying?
global received_kill
if received_kill:
return
received_kill = True
g.log(g.INFO, "received signal %d" % signum)
g.exitValue = 2
sys.exit(2)
def bytestr(size, precision=1):
"""Return a string representing the greek/metric suffix of a size"""
if size==1:
return '1 byte'
for factor, suffix in _abbrevs:
if size >= factor:
break
float_string_split = `size/float(factor)`.split('.')
integer_part = float_string_split[0]
decimal_part = float_string_split[1]
if int(decimal_part[0:precision]):
float_string = '.'.join([integer_part, decimal_part[0:precision]])
else:
float_string = integer_part
return float_string + suffix
class CatThread(threading.Thread):
"""
Simple threading wrapper to read a file descriptor and put the contents
in the log file.
The fd is assumed to be stdout and stderr from gpfdist. We must use select.select
and locks to ensure both threads are not read at the same time. A dead lock
situation could happen if they did. communicate() is not used since it blocks.
We will wait 1 second between read attempts.
"""
def __init__(self,gpload,fd, sharedLock = None):
threading.Thread.__init__(self)
self.gpload = gpload
self.fd = fd
self.theLock = sharedLock
def run(self):
try:
if windowsPlatform == True:
while 1:
# Windows select does not support select on non-file fd's, so we can use the lock fix. Deadlock is possible here.
# We need to look into the Python windows module to see if there is another way to do this in Windows.
line = self.fd.readline()
if line=='':
break
self.gpload.log(self.gpload.LOG, 'gpfdist: ' + line.strip('\n'))
else:
while 1:
retList = select.select( [self.fd]
, []
, []
, 1
)
if retList[0] == [self.fd]:
self.theLock.acquire()
line = self.fd.readline()
self.theLock.release()
else:
continue
if line=='':
break
self.gpload.log(self.gpload.LOG, 'gpfdist: ' + line.strip('\n'))
except Exception, e:
# close fd so that not block the worker thread because of stdout/stderr pipe not finish/closed.
self.fd.close()
sys.stderr.write("\n\nWarning: gpfdist log halt because Log Thread '%s' got an exception: %s \n" % (self.getName(), str(e)))
self.gpload.log(self.gpload.WARN, "gpfdist log halt because Log Thread '%s' got an exception: %s" % (self.getName(), str(e)))
raise
class Progress(threading.Thread):
"""
Determine our progress from the gpfdist daemon
"""
def __init__(self,gpload,ports):
threading.Thread.__init__(self)
self.gpload = gpload
self.ports = ports
self.number = 0
self.condition = threading.Condition()
def get(self,port):
"""
Connect to gpfdist and issue an HTTP query. No need to do this with
httplib as the transaction is extremely simple
"""
addrinfo = socket.getaddrinfo('localhost', port)
s = socket.socket(addrinfo[0][0],socket.SOCK_STREAM)
s.connect(('localhost',port))
s.sendall('GET gpfdist/status HTTP/1.0\r\n\r\n')
f = s.makefile()
read_bytes = -1
total_bytes = -1
total_sessions = -1
for line in f:
self.gpload.log(self.gpload.DEBUG, "gpfdist stat: %s" % \
line.strip('\n'))
a = line.split(' ')
if not a:
continue
if a[0]=='read_bytes':
read_bytes = int(a[1])
elif a[0]=='total_bytes':
total_bytes = int(a[1])
elif a[0]=='total_sessions':
total_sessions = int(a[1])
s.close()
f.close()
return read_bytes,total_bytes,total_sessions
def get1(self):
"""
Parse gpfdist output
"""
read_bytes = 0
total_bytes = 0
for port in self.ports:
a = self.get(port)
if a[2]<1:
return
if a[0]!=-1:
read_bytes += a[0]
if a[1]!=-1:
total_bytes += a[1]
self.gpload.log(self.gpload.INFO,'transferred %s of %s' % \
(bytestr(read_bytes),bytestr(total_bytes)))
def run(self):
"""
Thread worker
"""
while 1:
try:
self.condition.acquire()
n = self.number
self.condition.release()
self.get1()
if n:
self.gpload.log(self.gpload.DEBUG, "gpfdist status thread told to stop")
self.condition.acquire()
self.condition.notify()
self.condition.release()
break
except socket.error, e:
self.gpload.log(self.gpload.DEBUG, "got socket exception: %s" % e)
break
time.sleep(1)
def cli_help():
help_path = os.path.join(sys.path[0], '..', 'docs', 'cli_help', EXECNAME +
'_help');
f = None
try:
try:
f = open(help_path);
return f.read(-1)
except:
return ''
finally:
if f: f.close()
#============================================================
def usage(error = None):
print cli_help() or __doc__
sys.stdout.flush()
if error:
sys.stderr.write('ERROR: ' + error + '\n')
sys.stderr.write('\n')
sys.stderr.flush()
sys.exit(2)
def quote(a):
"""
SQLify a string
"""
return "'"+a.replace("'","''").replace('\\','\\\\')+"'"
def splitPgpassLine(a):
"""
If the user has specified a .pgpass file, we'll have to parse it. We simply
split the string into arrays at :. We could just use a native python
function but we need to escape the ':' character.
"""
b = []
escape = False
d = ''
for c in a:
if not escape and c=='\\':
escape = True
elif not escape and c==':':
b.append(d)
d = ''
else:
d += c
escape = False
if escape:
d += '\\'
b.append(d)
return b
def test_key(gp, key, crumb):
"""
Make sure that a key is a valid keyword in the configuration grammar and
that it appears in the configuration file where we expect -- that is, where
it has the parent we expect
"""
val = valid_tokens.get(key)
if val == None:
gp.log(gp.ERROR, 'unrecognized key: "%s"' % key)
p = val['parent']
# simplify for when the same keyword can appear in multiple places
if type(p) != list:
p = [p]
c = None
if len(crumb):
c = crumb[-1]
found = False
for m in p:
if m == c:
found = True
break
if not found:
gp.log(gp.ERROR, 'unexpected key: "%s"' % key)
return val
def yaml_walk(gp, node, crumb):
if type(node) == list:
for a in node:
if type(a) == tuple:
key = a[0].value.lower()
val = test_key(gp, key, crumb)
if (len(a) > 1 and val['parse_children'] and
(isinstance(a[1], yaml.nodes.MappingNode) or
isinstance(a[1], yaml.nodes.SequenceNode))):
crumb.append(key)
yaml_walk(gp, a[1], crumb)
crumb.pop()
elif isinstance(a, yaml.nodes.ScalarNode):
test_key(gp, a.value, crumb)
else:
yaml_walk(gp, a, crumb)
elif isinstance(node, yaml.nodes.MappingNode):
yaml_walk(gp, node.value, crumb)
elif isinstance(node, yaml.nodes.ScalarNode):
pass
elif isinstance(node, yaml.nodes.SequenceNode):
yaml_walk(gp, node.value, crumb)
elif isinstance(node, yaml.nodes.CollectionNode):
pass
def changeToUnicode(a):
"""
Change every entry in a list or dictionary to a unicode item
"""
if type(a) == list:
return map(changeToUnicode,a)
if type(a) == dict:
b = dict()
for key,value in a.iteritems():
if type(key) == str:
key = unicode(key)
b[key] = changeToUnicode(value)
return b
if type(a) == str:
a = unicode(a)
return a
def dictKeyToLower(a):
"""
down case all entries in a list or dict
"""
if type(a) == list:
return map(dictKeyToLower,a)
if type(a) == dict:
b = dict()
for key,value in a.iteritems():
if type(key) == str:
key = unicode(key.lower())
b[key] = dictKeyToLower(value)
return b
if type(a) == str:
a = unicode(a)
return a
#
# MPP-13348
#
'''Jenkins hash - http://burtleburtle.net/bob/hash/doobs.html'''
def jenkinsmix(a, b, c):
a &= 0xffffffff; b &= 0xffffffff; c &= 0xffffffff
a -= b; a -= c; a ^= (c>>13); a &= 0xffffffff
b -= c; b -= a; b ^= (a<<8); b &= 0xffffffff
c -= a; c -= b; c ^= (b>>13); c &= 0xffffffff
a -= b; a -= c; a ^= (c>>12); a &= 0xffffffff
b -= c; b -= a; b ^= (a<<16); b &= 0xffffffff
c -= a; c -= b; c ^= (b>>5); c &= 0xffffffff
a -= b; a -= c; a ^= (c>>3); a &= 0xffffffff
b -= c; b -= a; b ^= (a<<10); b &= 0xffffffff
c -= a; c -= b; c ^= (b>>15); c &= 0xffffffff
return a, b, c
def jenkins(data, initval = 0):
length = lenpos = len(data)
if length == 0:
return 0
a = b = 0x9e3779b9
c = initval
p = 0
while lenpos >= 12:
a += (ord(data[p+0]) + (ord(data[p+1])<<8) + (ord(data[p+2])<<16) + (ord(data[p+3])<<24))
b += (ord(data[p+4]) + (ord(data[p+5])<<8) + (ord(data[p+6])<<16) + (ord(data[p+7])<<24))
c += (ord(data[p+8]) + (ord(data[p+9])<<8) + (ord(data[p+10])<<16) + (ord(data[p+11])<<24))
a, b, c = jenkinsmix(a, b, c)
p += 12
lenpos -= 12
c += length
if lenpos >= 11: c += ord(data[p+10])<<24
if lenpos >= 10: c += ord(data[p+9])<<16
if lenpos >= 9: c += ord(data[p+8])<<8
if lenpos >= 8: b += ord(data[p+7])<<24
if lenpos >= 7: b += ord(data[p+6])<<16
if lenpos >= 6: b += ord(data[p+5])<<8
if lenpos >= 5: b += ord(data[p+4])
if lenpos >= 4: a += ord(data[p+3])<<24
if lenpos >= 3: a += ord(data[p+2])<<16
if lenpos >= 2: a += ord(data[p+1])<<8
if lenpos >= 1: a += ord(data[p+0])
a, b, c = jenkinsmix(a, b, c)
return c
# MPP-20927 Citibank: gpload external table name problem
# Not sure if it is used by other components, just leave it here.
def shortname(name):
"""
Returns a 10 character string formed by concatenating the first two characters
of the name with another 8 character string computed using the Jenkins hash
function of the table name. When the original name has only a single non-space
ascii character, we return '00' followed by 8 char hash.
For example:
>>> shortname('mytable')
'my3cbb7ba8'
>>> shortname('some_pretty_long_test_table_name')
'so9068664a'
>>> shortname('t')
'006742be70'
@param name: the input tablename
@returns: a string 10 characters or less built from the table name
"""
# Remove spaces from original name
name = re.sub(r' ', '', name)
# Run the hash function
j = jenkins(name)
# Now also remove non ascii chars from original name.
# We do this after jenkins so that we exclude the
# (very rare) case of passing an empty string to jenkins
name = "".join(i for i in name if ord(i) < 128)
if len(name) > 1:
return '%2s%08x' % (name[0:2], j)
else:
return '00%08x' % (j) # could be len 0 or 1
class options:
pass
class gpload:
"""
Main class wrapper
"""
def __init__(self,argv):
self.threads = [] # remember threads so that we can join() against them
self.exitValue = 0
self.options = options()
self.options.h = None
self.options.gpfdist_timeout = None
self.options.p = None
self.options.U = None
self.options.W = False
self.options.D = False
self.options.no_auto_trans = False
self.options.password = None
self.options.d = None
self.DEBUG = 5
self.LOG = 4
self.INFO = 3
self.WARN = 2
self.ERROR = 1
self.options.qv = self.INFO
self.options.l = None
self.lastcmdtime = ''
self.cmdtime = ''
self.formatOpts = ""
seenv = False
seenq = False
# Create Temp and External table names. However external table name could
# get overwritten with another name later on (see create_external_table_name).
# MPP-20927 Citibank: gpload external table name problem. We use uuid to avoid
# external table name confliction.
self.unique_suffix = str(uuid.uuid1()).replace('-', '_')
self.staging_table_name = 'temp_staging_gpload_' + self.unique_suffix
self.extTableName = 'ext_gpload_' + self.unique_suffix
# SQL to run in order to undo our temporary work
self.cleanupSql = []
self.distkey = None
configFilename = None
while argv:
try:
try:
if argv[0]=='-h':
self.options.h = argv[1]
argv = argv[2:]
if argv[0]=='--gpfdist_timeout':
self.options.gpfdist_timeout = argv[1]
argv = argv[2:]
elif argv[0]=='-p':
self.options.p = int(argv[1])
argv = argv[2:]
elif argv[0]=='-l':
self.options.l = argv[1]
argv = argv[2:]
elif argv[0]=='-q':
self.options.qv -= 1
argv = argv[1:]
seenq = True
elif argv[0]=='--version':
sys.stderr.write("gpload version $Revision$\n")
sys.exit(0)
elif argv[0]=='-v':
self.options.qv = self.LOG
argv = argv[1:]
seenv = True
elif argv[0]=='-V':
self.options.qv = self.DEBUG
argv = argv[1:]
seenv = True
elif argv[0]=='-W':
self.options.W = True
argv = argv[1:]
elif argv[0]=='-D':
self.options.D = True
argv = argv[1:]
elif argv[0]=='-U':
self.options.U = argv[1]
argv = argv[2:]
elif argv[0]=='-d':
self.options.d = argv[1]
argv = argv[2:]
elif argv[0]=='-f':
configFilename = argv[1]
argv = argv[2:]
elif argv[0]=='--no_auto_trans':
self.options.no_auto_trans = True
argv = argv[1:]
elif argv[0]=='-?':
usage()
else:
break
except IndexError:
sys.stderr.write("Option %s needs a parameter.\n"%argv[0])
sys.exit(2)
except ValueError:
sys.stderr.write("Parameter for option %s must be an integer.\n"%argv[0])
sys.exit(2)
if configFilename==None:
usage('configuration file required')
elif argv:
a = ""
if len(argv) > 1:
a = "s"
usage('unrecognized argument%s: %s' % (a, ' '.join(argv)))
# default to gpAdminLogs for a log file, may be overwritten
if self.options.l is None:
self.options.l = os.path.join(os.environ.get('HOME', '.'),'gpAdminLogs')
if not os.path.isdir(self.options.l):
os.mkdir(self.options.l)
self.options.l = os.path.join(self.options.l, 'gpload_' + \
datetime.date.today().strftime('%Y%m%d') + '.log')
try:
self.logfile = open(self.options.l,'a')
except Exception, e:
self.log(self.ERROR, "could not open logfile %s: %s" % \
(self.options.l, e))
if seenv and seenq:
self.log(self.ERROR, "-q conflicts with -v and -V")
if self.options.D:
self.log(self.INFO, 'gpload has the -D option, so it does not actually load any data')
try:
f = open(configFilename,'r')
except IOError,e:
self.log(self.ERROR, "could not open configuration file: %s" % e)
# pull in the config file, which should be in valid YAML
try:
# do an initial parse, validating the config file
doc = f.read()
self.config = yaml.load(doc)
self.configOriginal = changeToUnicode(self.config)
self.config = dictKeyToLower(self.config)
ver = self.getconfig('version', unicode, extraStuff = ' tag')
if ver != '1.0.0.1':
self.control_file_error("gpload configuration schema version must be 1.0.0.1")
# second parse, to check that the keywords are sensible
y = yaml.compose(doc)
# first should be MappingNode
if not isinstance(y, yaml.MappingNode):
self.control_file_error("configuration file must begin with a mapping")
yaml_walk(self, y.value, [])
except yaml.scanner.ScannerError,e:
self.log(self.ERROR, "configuration file error: %s, line %s" % \
(e.problem, e.problem_mark.line))
except yaml.reader.ReaderError, e:
es = ""
if isinstance(e.character, str):
es = "'%s' codec can't decode byte #x%02x: %s position %d" % \
(e.encoding, ord(e.character), e.reason,
e.position)
else:
es = "unacceptable character #x%04x at byte %d: %s" \
% (ord(e.character), e.position, e.reason)
self.log(self.ERROR, es)
except yaml.error.MarkedYAMLError, e:
self.log(self.ERROR, "configuration file error: %s, line %s" % \
(e.problem, e.problem_mark.line))
f.close()
self.subprocesses = []
self.log(self.INFO,'gpload session started ' + \
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
def control_file_warning(self, msg):
self.log(self.WARN, "A gpload control file processing warning occurred. %s" % msg)
def control_file_error(self, msg):
self.log(self.ERROR, "A gpload control file processing error occurred. %s" % msg)
def elevel2str(self, level):
if level == self.DEBUG:
return "DEBUG"
elif level == self.LOG:
return "LOG"
elif level == self.INFO:
return "INFO"
elif level == self.ERROR:
return "ERROR"
elif level == self.WARN:
return "WARN"
else:
self.log(self.ERROR, "unknown log type %i" % level)
def log(self, level, a):
"""
Level is either DEBUG, LOG, INFO, ERROR. a is the message
"""
try:
t = time.localtime()
str = '|'.join(
[datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
self.elevel2str(level), a]) + '\n'
str = str.encode('utf-8')
except Exception, e:
# log even if contains non-utf8 data and pass this exception
self.logfile.write("\nWarning: Log() threw an exception: %s \n" % (e))
if level <= self.options.qv:
sys.stdout.write(str)
if level <= self.options.qv or level <= self.LOG:
try:
self.logfile.write(str)
self.logfile.flush()
except AttributeError, e:
pass
if level == self.ERROR:
self.exitValue = 2;
sys.exit(self.exitValue)
def getconfig(self, a, typ=None, default='error', extraStuff='', returnOriginal=False):
"""
Look for a config entry, via a column delimited string. a:b:c points to
a:
b:
c
Make sure that end point is of type 'typ' when not set to None.
If returnOriginal is False, the return value will be in lower case,
else the return value will be in its original form (i.e. the case that
the user specified in their yaml file).
"""
self.log(self.DEBUG, "getting config for " + a)
if returnOriginal == True:
config = self.configOriginal
else:
config = self.config
for s in a.split(':'):
self.log(self.DEBUG, "trying " + s)
index = 1
if s[-1:]==')':
j = s.index('(')
index = int(s[j+1:-1])
s = s[:j]
if type(config)!=list:
config = [config]
for c in config:
if type(c)==dict:
temp = caseInsensitiveDictLookup(s, c)
if temp != None:
index -= 1
if not index:
self.log(self.DEBUG, "found " + s)
config = temp
break
else:
if default=='error':
self.control_file_error("The configuration must contain %s%s"%(a,extraStuff))
sys.exit(2)
return default
if typ != None and type(config) != typ:
if typ == list:
self.control_file_error("The %s entry must be a YAML sequence %s"% (a ,extraStuff))
elif typ == dict:
self.control_file_error("The %s entry must be a YAML mapping %s"% (a, extraStuff))
elif typ == unicode or typ == str:
self.control_file_error("%s must be a string %s" % (a, extraStuff))
elif typ == int:
self.control_file_error("The %s entry must be a YAML integer %s" % (a, extraStuff))
else:
assert 0
self.control_file_error("Encountered unknown configuration type %s"% type(config))
sys.exit(2)
return config
def read_config(self):
"""
Configure ourselves
"""
# ensure output is of type list
self.getconfig('gpload:output', list)
# The user supplied table name can be completely or partially delimited,
# and it can be a one or two part name. Get the originally supplied name
# and parse it into its delimited one or two part name.
self.schemaTable = self.getconfig('gpload:output:table', unicode, returnOriginal=True)
schemaTableList = splitUpMultipartIdentifier(self.schemaTable)
schemaTableList = convertListToDelimited(schemaTableList)
if len(schemaTableList) == 2:
self.schema = schemaTableList[0]
self.table = schemaTableList[1]
else:
self.schema = None
self.table = schemaTableList[0]
# Precendence for configuration: command line > config file > env
# variable
# host to connect to
if not self.options.h:
self.options.h = self.getconfig('host', unicode, None)
if self.options.h:
self.options.h = str(self.options.h)
if not self.options.h:
self.options.h = os.environ.get('PGHOST')
if not self.options.h or len(self.options.h) == 0:
self.log(self.INFO, "no host supplied, defaulting to localhost")
self.options.h = "localhost"
# Port to connect to
if not self.options.p:
self.options.p = self.getconfig('port',int,None)
if not self.options.p:
try:
self.options.p = int(os.environ.get('PGPORT'))
except (ValueError, TypeError):
pass
if not self.options.p:
self.options.p = 5432
# User to connect as
if not self.options.U:
self.options.U = self.getconfig('user', unicode, None)
if not self.options.U:
self.options.U = os.environ.get('PGUSER')
if not self.options.U:
self.options.U = os.environ.get('USER') or \
os.environ.get('LOGNAME') or \
os.environ.get('USERNAME')
if not self.options.U or len(self.options.U) == 0:
self.log(self.ERROR,
"You need to specify your username with the -U " +
"option or in your configuration or in your " +
"environment as PGUSER")
# database to connect to
if not self.options.d:
self.options.d = self.getconfig('database', unicode, None)
if not self.options.d:
self.options.d = os.environ.get('PGDATABASE')
if not self.options.d:
# like libpq, just inherit USER
self.options.d = self.options.U
if self.getconfig('gpload:input:error_table', unicode, None):
self.control_file_error("ERROR_TABLE is not supported. Please use LOG_ERRORS instead.")
def gpfdist_port_options(self, name, availablePorts, popenList):
"""
Adds gpfdist -p / -P port options to popenList based on port and port_range in YAML file.
Raises errors if options are invalid or ports are unavailable.
@param name: input source name from YAML file.
@param availablePorts: current set of available ports
@param popenList: gpfdist options (updated)
"""
port = self.getconfig(name + ':port', int, None)
port_range = self.getconfig(name+':port_range', list, None)
if port:
startPort = endPort = port
endPort += 1
elif port_range:
try:
startPort = int(port_range[0])
endPort = int(port_range[1])
except (IndexError,ValueError):
self.control_file_error(name + ":port_range must be a YAML sequence of two integers")
else:
startPort = self.getconfig(name+':port',int,8000)
endPort = self.getconfig(name+':port',int,9000)
if (startPort > 65535 or endPort > 65535):
# Do not allow invalid ports
self.control_file_error("Invalid port. Port values must be less than or equal to 65535.")
elif not (set(xrange(startPort,endPort+1)) & availablePorts):
self.log(self.ERROR, "no more ports available for gpfdist")
popenList.append('-p')
popenList.append(str(startPort))
popenList.append('-P')
popenList.append(str(endPort))
def gpfdist_filenames(self, name, popenList):
"""
Adds gpfdist -f filenames to popenList.
Raises errors if YAML file option is invalid.
@param name: input source name from YAML file.
@param popenList: gpfdist options (updated)
@return: list of files names
"""
file = self.getconfig(name+':file',list)
for i in file:
if type(i)!= unicode and type(i) != str:
self.control_file_error(name + ":file must be a YAML sequence of strings")
popenList.append('-f')
popenList.append('"'+' '.join(file)+'"')
return file
def gpfdist_timeout_options(self, popenList):
"""
Adds gpfdist -t timeout option to popenList.
@param popenList: gpfdist options (updated)
"""
if self.options.gpfdist_timeout != None:
gpfdistTimeout = self.options.gpfdist_timeout
else:
gpfdistTimeout = 30
popenList.append('-t')
popenList.append(str(gpfdistTimeout))
def gpfdist_verbose_options(self, popenList):
"""
Adds gpfdist -v / -V options to popenList depending on logging level
@param popenList: gpfdist options (updated)
"""
if self.options.qv == self.LOG:
popenList.append('-v')
elif self.options.qv > self.LOG:
popenList.append('-V')
def gpfdist_max_line_length(self, popenList):
"""
Adds gpfdist -m option to popenList when max_line_length option specified in YAML file.
@param popenList: gpfdist options (updated)
"""
max_line_length = self.getconfig('gpload:input:max_line_length',int,None)
if max_line_length is not None:
popenList.append('-m')
popenList.append(str(max_line_length))
def gpfdist_transform(self, popenList):
"""
Compute and return url fragment if transform option specified in YAML file.
Checks for readable transform config file if transform_config option is specified.
Adds gpfdist -c option to popenList if transform_config is specified.
Validates that transform_config is present when transform option is specified.
@param popenList: gpfdist options (updated)
@returns: uri fragment for transform or "" if not appropriate.
"""
transform = self.getconfig('gpload:input:transform', unicode, None)
transform_config = self.getconfig('gpload:input:transform_config', unicode, None)
if transform_config:
try:
f = open(transform_config,'r')
except IOError,e:
self.log(self.ERROR, "could not open transform_config file: %s" % e)
f.close()
popenList.append('-c')
popenList.append(transform_config)
else:
if transform:
self.control_file_error("transform_config is required when transform is specified")
fragment = ""
if transform is not None:
fragment = "#transform=" + transform
return fragment
def gpfdist_ssl(self, popenList):
"""
Adds gpfdist --ssl option to popenList when ssl option specified as true in YAML file.
@param popenList: gpfdist options (updated)
"""
ssl = self.getconfig('gpload:input:source:ssl',bool, False)
certificates_path = self.getconfig('gpload:input:source:certificates_path', unicode, None)
if ssl and certificates_path:
dir_exists = os.path.isdir(certificates_path)
if dir_exists == False:
self.log(self.ERROR, "could not access CERTIFICATES_PATH directory: %s" % certificates_path)
popenList.append('--ssl')
popenList.append(certificates_path)
else:
if ssl:
self.control_file_error("CERTIFICATES_PATH is required when SSL is specified as true")
elif certificates_path: # ssl=false (or not specified) and certificates_path is specified
self.control_file_error("CERTIFICATES_PATH is specified while SSL is not specified as true")
def start_gpfdists(self):
"""
Start gpfdist daemon(s)
"""
self.locations = []
self.ports = []
sourceIndex = 0
availablePorts = set(xrange(1,65535))
found_source = False
self.getconfig('gpload:input', list)
while 1:
sourceIndex += 1
name = 'gpload:input:source(%d)'%sourceIndex
a = self.getconfig(name,None,None)
if not a:
break
found_source = True
local_hostname = self.getconfig(name+':local_hostname', list, False)
# do default host, the current one
if not local_hostname:
# if fully_qualified_domain_name is defined and set to true we want to
# resolve the fqdn rather than just grabbing the hostname.
fqdn = self.getconfig('gpload:input:fully_qualified_domain_name', bool, False)
if fqdn:
local_hostname = [socket.getfqdn()]
else:
local_hostname = [socket.gethostname()]
# build gpfdist parameters
popenList = ['gpfdist']
self.gpfdist_ssl(popenList)
self.gpfdist_port_options(name, availablePorts, popenList)
file = self.gpfdist_filenames(name, popenList)
self.gpfdist_timeout_options(popenList)
self.gpfdist_verbose_options(popenList)
self.gpfdist_max_line_length(popenList)
fragment = self.gpfdist_transform(popenList)
try:
self.log(self.LOG, 'trying to run %s' % ' '.join(popenList))
cfds = True
if platform.system() in ['Windows', 'Microsoft']: # not supported on win32
cfds = False
cmd = ' '.join(popenList)
needshell = False
else:
srcfile = None
if os.environ.get('GPHOME_LOADERS'):
srcfile = os.path.join(os.environ.get('GPHOME_LOADERS'),
'greenplum_loaders_path.sh')
elif os.environ.get('GPHOME'):
srcfile = os.path.join(os.environ.get('GPHOME'),
'greenplum_path.sh')
if (not (srcfile and os.path.exists(srcfile))):
self.log(self.ERROR, 'cannot find greenplum environment ' +
'file: environment misconfigured')
cmd = 'source %s ; exec ' % srcfile
cmd += ' '.join(popenList)
needshell = True
a = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=cfds, shell=needshell)
self.subprocesses.append(a)
except Exception, e:
self.log(self.ERROR, "could not run %s: %s" % \
(' '.join(popenList), str(e)))
"""
Reading from stderr and stdout on a Popen object can result in a dead lock if done at the same time.
Create a lock to share when reading stderr and stdout from gpfdist.
"""
readLock = threading.Lock()
# get all the output from the daemon(s)
t = CatThread(self,a.stderr, readLock)
t.start()
self.threads.append(t)
while 1:
readLock.acquire()
line = a.stdout.readline()
readLock.release()
if line=='':
self.log(self.ERROR,'failed to start gpfdist: ' +
'gpfdist command line: ' + ' '.join(popenList))
line = line.strip('\n')
self.log(self.LOG,'gpfdist says: ' + line)
if (line.startswith('Serving HTTP on port ') or line.startswith('Serving HTTPS on port ')):
port = int(line[21:line.index(',')])
break
self.log(self.INFO, 'started %s' % ' '.join(popenList))
self.log(self.LOG,'gpfdist is running on port %d'%port)
if port in availablePorts:
availablePorts.remove(port)
self.ports.append(port)
t = CatThread(self,a.stdout,readLock)
t.start()
self.threads.append(t)
ssl = self.getconfig('gpload:input:source:ssl', bool, False)
if ssl:
protocol = 'gpfdists'
else:
protocol = 'gpfdist'
for l in local_hostname:
if type(l) != str and type(l) != unicode:
self.control_file_error(name + ":local_hostname must be a YAML sequence of strings")
l = str(l)
sep = ''
if file[0] != '/':
sep = '/'
# MPP-13617
if ':' in l:
l = '[' + l + ']'
self.locations.append('%s://%s:%d%s%s%s' % (protocol, l, port, sep, '%20'.join(file), fragment))
if not found_source:
self.control_file_error("configuration file must contain source definition")
def readPgpass(self,pgpassname):
"""
Get password form .pgpass file
"""
try:
f = open(pgpassname,'r')
except IOError:
return
for row in f:
try:
row = row.rstrip("\n")
line = splitPgpassLine(row)
if line[0]!='*' and line[0].lower()!=self.options.h.lower():
continue
if line[1]!='*' and int(line[1])!=self.options.p:
continue
if line[2]!='*' and line[2]!=self.options.d:
continue
if line[3]!='*' and line[3]!=self.options.U:
continue
self.options.password = line[4]
break
except (ValueError,IndexError):
pass
f.close()
def setup_connection(self, recurse = 0):
"""
Connect to the backend
"""
if self.db != None:
self.db.close()
self.db = None
if self.options.W:
if self.options.password==None:
self.options.password = getpass.getpass()
else:
if self.options.password==None:
self.options.password = self.getconfig('password', unicode,
None)
if self.options.password==None:
self.options.password = os.environ.get('PGPASSWORD')
if self.options.password==None:
self.readPgpass(os.environ.get('PGPASSFILE',
os.environ.get('HOME','.')+'/.pgpass'))
try:
self.log(self.DEBUG, "connection string:" +
" user=" + str(self.options.U) +
" host=" + str(self.options.h) +
" port=" + str(self.options.p) +
" database=" + str(self.options.d))
self.db = pg.DB( dbname=self.options.d
, host=self.options.h
, port=self.options.p
, user=self.options.U
, passwd=self.options.password
)
self.log(self.DEBUG, "Successfully connected to database")
except Exception, e:
errorMessage = str(e)
if errorMessage.find("no password supplied") != -1:
self.options.password = getpass.getpass()
recurse += 1
if recurse > 10:
self.log(self.ERROR, "too many login attempt failures")
self.setup_connection(recurse)
else:
self.log(self.ERROR, "could not connect to database: %s. Is " \
"the Greenplum Database running on port %i?" % (errorMessage,
self.options.p))
def read_columns(self):
columns = self.getconfig('gpload:input:columns',list,None, returnOriginal=True)
if columns != None:
self.from_cols_from_user = True # user specified from columns
self.from_columns = []
for d in columns:
if type(d)!=dict:
self.control_file_error("gpload:input:columns must be a sequence of YAML mappings")
tempkey = d.keys()[0]
value = d[tempkey]
""" remove leading or trailing spaces """
d = { tempkey.strip() : value }
key = d.keys()[0]
if d[key] == None:
self.log(self.DEBUG,
'getting source column data type from target')
for name, typ, mapto, hasseq in self.into_columns:
if sqlIdentifierCompare(name, key):
d[key] = typ
break
# perform the same kind of magic type change that postgres does
if d[key] == 'bigserial':
d[key] = 'bigint'
elif d[key] == 'serial':
d[key] = 'int4'
# Mark this column as having no mapping, which is important
# for do_insert()
self.from_columns.append([key.lower(),d[key].lower(),None, False])
else:
self.from_columns = self.into_columns
self.from_cols_from_user = False
# make sure that all columns have a type
for name, typ, map, hasseq in self.from_columns:
if typ == None:
self.log(self.ERROR, 'column "%s" has no type ' % name +
'and does not appear in target table "%s"' % self.schemaTable)
self.log(self.DEBUG, 'from columns are:')
for c in self.from_columns:
name = c[0]
typ = c[1]
self.log(self.DEBUG, '%s: %s'%(name,typ))
def read_table_metadata(self):
# KAS Note to self. If schema is specified, then probably should use PostgreSQL rules for defining it.
# find the shema name for this table (according to search_path)
# if it was not explicitly specified in the configuration file.
if self.schema == None:
queryString = """SELECT n.nspname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
WHERE c.relname = '%s'
AND pg_catalog.pg_table_is_visible(c.oid);""" % quote_unident(self.table)
resultList = self.db.query(queryString.encode('utf-8')).getresult()
if len(resultList) > 0:
self.schema = (resultList[0])[0]
self.log(self.INFO, "setting schema '%s' for table '%s'" % (self.schema, quote_unident(self.table)))
else:
self.log(self.ERROR, "table %s not found in any database schema" % self.table)
queryString = """select nt.nspname as table_schema,
c.relname as table_name,
a.attname as column_name,
a.attnum as ordinal_position,
format_type(a.atttypid, a.atttypmod) as data_type,
c.relkind = 'r' AS is_updatable,
a.atttypid in (23, 20) and a.atthasdef and
(select position ( 'nextval(' in pg_catalog.pg_get_expr(adbin,adrelid) ) > 0 and
position ( '::regclass)' in pg_catalog.pg_get_expr(adbin,adrelid) ) > 0
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum AND a.atthasdef) as has_sequence
from pg_catalog.pg_class c join pg_catalog.pg_namespace nt on (c.relnamespace = nt.oid)
join pg_attribute a on (a.attrelid = c.oid)
where c.relname = '%s' and nt.nspname = '%s'
and a.attnum > 0 and a.attisdropped = 'f'
order by a.attnum """ % (quote_unident(self.table), quote_unident(self.schema))
count = 0
self.into_columns = []
self.into_columns_dict = dict()
resultList = self.db.query(queryString.encode('utf-8')).dictresult()
while count < len(resultList):
row = resultList[count]
count += 1
ct = unicode(row['data_type'])
if ct == 'bigserial':
ct = 'bigint'
elif ct == 'serial':
ct = 'int4'
name = unicode(row['column_name'], 'utf-8')
name = quote_ident(name)
if unicode(row['has_sequence']) != unicode('f'):
has_seq = True
else:
has_seq = False
i = [name,ct,None, has_seq]
self.into_columns.append(i)
self.into_columns_dict[name] = i
self.log(self.DEBUG, "found input column: " + str(i))
if count == 0:
# see if it's a permissions issue or it actually doesn't exist
tableName = quote_unident(self.table)
tableSchema = quote_unident(self.schema)
sql = """select 1 from pg_class c, pg_namespace n
where c.relname = '%s' and
n.nspname = '%s' and
n.oid = c.relnamespace""" % (tableName, tableSchema)
resultList = self.db.query(sql.encode('utf-8')).getresult()
if len(resultList) > 0:
self.log(self.ERROR, "permission denied for table %s.%s" % \
(tableSchema, tableName))
else:
self.log(self.ERROR, 'table %s.%s does not exist in database %s'% (tableSchema, tableName, self.options.d))
def read_mapping(self):
mapping = self.getconfig('gpload:output:mapping',dict,None, returnOriginal=True)
if mapping:
for key,value in mapping.iteritems():
if type(key) != unicode or type(value) != unicode:
self.control_file_error("gpload:output:mapping must be a YAML type mapping from strings to strings")
found = False
for a in self.into_columns:
if sqlIdentifierCompare(a[0], key) == True:
a[2] = value
found = True
break
if found == False:
self.log(self.ERROR,'%s in mapping is not in table %s'% \
(key, self.schemaTable))
else:
# Now, map anything yet to be mapped to itself, picking up on those
# columns which are not found in the table.
for x in self.from_columns:
# Check to see if it already has a mapping value
i = filter(lambda a:a[2] == x[0], self.into_columns)
if not i:
# Check to see if the target column names match the input column names.
for a in self.into_columns:
if sqlIdentifierCompare(a[0], x[0]) == True:
i = a
found = True
break
if i:
if i[2] == None: i[2] = i[0]
else:
self.log(self.ERROR, 'no mapping for input column ' +
'"%s" to output table' % x[0])
for name,typ,mapto,seq in self.into_columns:
self.log(self.DEBUG,'%s: %s = %s'%(name,typ,mapto))
# In order to find out whether we have an existing external table in the
# catalog which could be reused for this operation we need to make sure
# that it has the same column names and types, the same data format, and
# location specification, and single row error handling specs.
#
# This function will return the SQL to run in order to find out whether
# such a table exists.
#
def get_reuse_exttable_query(self, formatType, formatOpts, limitStr, from_cols, schemaName, log_errors):
sqlFormat = """select attrelid::regclass
from (
select
attrelid,
row_number() over (partition by attrelid order by attnum) as attord,
attnum,
attname,
atttypid::regtype
from
pg_attribute
join
pg_class
on (pg_class.oid = attrelid)
%s
where
relstorage = 'x' and
relname like 'ext_gpload_reusable_%%' and
attnum > 0 and
not attisdropped and %s
) pgattr
join
pg_exttable pgext
on(pgattr.attrelid = pgext.reloid)
"""
joinStr = ""
conditionStr = ""
# if schemaName is None, find the resuable ext table which is visible to
# current search path. Else find the resuable ext table under the specific
# schema, and this needs to join pg_namespace.
if schemaName is None:
joinStr = ""
conditionStr = "pg_table_is_visible(pg_class.oid)"
else:
joinStr = """join
pg_namespace pgns
on(pg_class.relnamespace = pgns.oid)
"""
conditionStr = "pgns.nspname = '%s'" % schemaName
sql = sqlFormat % (joinStr, conditionStr)
if log_errors:
sql += " WHERE pgext.fmterrtbl = pgext.reloid "
else:
sql += " WHERE pgext.fmterrtbl IS NULL "
for i, l in enumerate(self.locations):
sql += " and pgext.urilocation[%s] = %s\n" % (i + 1, quote(l))
sql+= """and pgext.fmttype = %s
and pgext.writable = false
and pgext.fmtopts like %s """ % (quote(formatType[0]),quote("%" + quote_unident(formatOpts.rstrip()) +"%"))
if limitStr:
sql += "and pgext.rejectlimit = %s " % limitStr
else:
sql += "and pgext.rejectlimit IS NULL "
sql+= "group by attrelid "
sql+= """having
count(*) = %s and
bool_and(case """ % len(from_cols)
for i, c in enumerate(from_cols):
name = c[0]
typ = c[1]
sql+= "when attord = %s then atttypid = %s::regtype and attname = %s\n" % (i+1, quote(typ), quote(quote_unident(name)))
sql+= """else true
end)
limit 1;"""
self.log(self.DEBUG, "query used to identify reusable external relations: %s" % sql)
return sql
#
# Create a string from the following conditions to reuse staging table:
# 1. same target table
# 2. same number of columns
# 3. same names and types, in the same order
# 4. same distribution key (according to columns' names and thier order)
#
def get_staging_conditions_string(self, target_table_name, staging_cols, distribution_cols):
columns_num = len(staging_cols)
staging_cols_str = '-'.join(map(lambda col:'%s-%s' % (quote(quote_unident(col[0])), quote(col[1])), staging_cols))
distribution_cols_str = '-'.join([quote(quote_unident(col)) for col in distribution_cols])
return '%s:%s:%s:%s' % (target_table_name, columns_num, staging_cols_str, distribution_cols_str)
#
# This function will return the SQL to run in order to find out whether
# we have an existing staging table in the catalog which could be reused for this
# operation, according to the mathod and the encoding conditions.
#
def get_reuse_staging_table_query(self, encoding_conditions):
sql = """SELECT oid::regclass
FROM pg_class
WHERE relname = 'staging_gpload_reusable_%s';""" % (encoding_conditions)
self.log(self.DEBUG, "query used to identify reusable temporary relations: %s" % sql)
return sql
#
# get oid for table from pg_class, None if not exist
#
def get_table_oid(self, tableName):
if tableName:
sql = "select %s::regclass::oid" % quote(quote_unident(tableName))
try:
resultList = self.db.query(sql.encode('utf-8')).getresult()
return resultList[0][0]
except Exception, e:
pass
return None
def get_ext_schematable(self, schemaName, tableName):
if schemaName == None:
return tableName
else:
schemaTable = "%s.%s" % (schemaName, tableName)
return schemaTable
def get_external_table_formatOpts(self, option, specify=''):
formatType = self.getconfig('gpload:input:format', unicode, 'text').lower()
if formatType == 'text':
valid_token = ['delimiter','escape']
elif formatType == 'csv':
valid_token = ['delimiter', 'quote', 'escape']
else:
valid_token = []
if not option in valid_token:
self.control_file_error("The option you specified doesn't support now")
return
if option == 'delimiter':
defval = ',' if formatType == 'csv' else '\t'
val = self.getconfig('gpload:input:delimiter', unicode, defval)
elif option == 'escape':
defval = self.getconfig('gpload:input:quote', unicode, '"')
val = self.getconfig('gpload:input:escape', unicode, defval)
elif option == 'quote':
val = self.getconfig('gpload:input:quote', unicode, '"')
else:
self.control_file_error("unexpected error -- backtrace " +
"written to log file")
sys.exit(2)
specify_str = str(specify) if specify else option
if len(val) != 1:
if val.startswith("E'") and val.endswith("'") and len(val[2:-1].decode('unicode-escape')) == 1:
subval = val[2:-1]
if subval == "\\'":
val = val
self.formatOpts += "%s %s " % (specify_str, val)
else:
val = subval.decode('unicode-escape')
self.formatOpts += "%s '%s' " % (specify_str, val)
elif len(val.decode('unicode-escape')) == 1:
val = val.decode('unicode-escape')
self.formatOpts += "%s '%s' " % (specify_str, val)
else:
self.control_file_warning(option +''' must be single ASCII charactor, you can also use unprintable characters(for example: '\\x1c' / E'\\x1c' or '\\u001c' / E'\\u001c' ''')
self.control_file_error("Invalid option, gpload quit immediately")
sys.exit(2);
else:
self.formatOpts += "%s '%s' " % (specify_str, val)
#
# Create a new external table or find a reusable external table to use for this operation
#
def create_external_table(self):
# extract all control file information and transform it accordingly
# in order to construct a CREATE EXTERNAL TABLE statement if will be
# needed later on
formatType = self.getconfig('gpload:input:format', unicode, 'text').lower()
locationStr = ','.join(map(quote,self.locations))
self.get_external_table_formatOpts('delimiter')
nullas = self.getconfig('gpload:input:null_as', unicode, False)
self.log(self.DEBUG, "null " + unicode(nullas))
if nullas != False: # could be empty string
self.formatOpts += "null %s " % quote(nullas)
elif formatType=='csv':
self.formatOpts += "null '' "
else:
self.formatOpts += "null %s " % quote("\N")
esc = self.getconfig('gpload:input:escape', None, None)
if esc:
if type(esc) != unicode and type(esc) != str:
self.control_file_error("gpload:input:escape must be a string")
if esc.lower() == 'off':
if formatType == 'csv':
self.control_file_error("ESCAPE cannot be set to OFF in CSV mode")
self.formatOpts += "escape 'off' "
else:
self.get_external_table_formatOpts('escape')
else:
if formatType=='csv':
self.get_external_table_formatOpts('quote','escape')
else:
self.formatOpts += "escape '\\'"
if formatType=='csv':
self.get_external_table_formatOpts('quote')
if self.getconfig('gpload:input:header',bool,False):
self.formatOpts += "header "
force_not_null_columns = self.getconfig('gpload:input:force_not_null',list,[])
if force_not_null_columns:
for i in force_not_null_columns:
if type(i) != unicode and type(i) != str:
self.control_file_error("gpload:input:force_not_null must be a YAML sequence of strings")
self.formatOpts += "force not null %s " % ','.join(force_not_null_columns)
encodingStr = self.getconfig('gpload:input:encoding', unicode, None)
limitStr = self.getconfig('gpload:input:error_limit',int, None)
if self.log_errors and not limitStr:
self.control_file_error("gpload:input:log_errors requires " +
"gpload:input:error_limit to be specified")
self.extSchemaName = self.getconfig('gpload:external:schema', unicode, None)
if self.extSchemaName == '%':
self.extSchemaName = self.schema
# get the list of columns to use in the extnernal table
if not self.from_cols_from_user:
# don't put values serial columns
from_cols = filter(lambda a: a[3] != True,
self.from_columns)
else:
from_cols = self.from_columns
# If the 'reuse tables' option was specified we now try to find an
# already existing external table in the catalog which will match
# the one that we need to use. It must have identical attributes,
# external location, format, and encoding specifications.
if self.reuse_tables == True:
# process the single quotes in order to successfully find an existing external table to reuse.
self.formatOpts = self.formatOpts.replace("E'\\''","'\''")
sql = self.get_reuse_exttable_query(formatType, self.formatOpts,
limitStr, from_cols, self.extSchemaName, self.log_errors)
resultList = self.db.query(sql.encode('utf-8')).getresult()
if len(resultList) > 0:
# found an external table to reuse. no need to create one. we're done here.
self.extTableName = (resultList[0])[0]
self.extSchemaTable = self.extTableName
self.log(self.INFO, "reusing external table %s" % self.extSchemaTable)
return
# didn't find an existing external table suitable for reuse. Format a reusable
# name and issue a CREATE EXTERNAL TABLE on it. Hopefully we can use it next time
# around
self.extTableName = "ext_gpload_reusable_%s" % self.unique_suffix
self.log(self.INFO, "did not find an external table to reuse. creating %s" % self.extTableName)
# process the single quotes in order to successfully create an external table.
self.formatOpts = self.formatOpts.replace("'\''","E'\\''")
# construct a CREATE EXTERNAL TABLE statement and execute it
self.extSchemaTable = self.get_ext_schematable(self.extSchemaName, self.extTableName)
sql = "create external table %s" % self.extSchemaTable
sql += "(%s)" % ','.join(map(lambda a:'%s %s' % (a[0], a[1]), from_cols))
sql += "location(%s) "%locationStr
sql += "format%s "% quote(formatType)
if len(self.formatOpts) > 0:
sql += "(%s) "% self.formatOpts
if encodingStr:
sql += "encoding%s "%quote(encodingStr)
if self.log_errors:
sql += "log errors "
if limitStr:
if limitStr < 2:
self.control_file_error("error_limit must be 2 or higher")
sql += "segment reject limit %s "%limitStr
try:
self.db.query(sql.encode('utf-8'))
except Exception, e:
self.log(self.ERROR, 'could not run SQL "%s": %s' % (sql, unicode(e)))
# set up to drop the external table at the end of operation, unless user
# specified the 'reuse_tables' option, in which case we don't drop
if self.reuse_tables == False:
self.cleanupSql.append('drop external table if exists %s'%self.extSchemaTable)
#
# Create a new staging table or find a reusable staging table to use for this operation
# (only valid for update/merge operations).
#
def create_staging_table(self):
# Do some initial work to extract the update_columns and metadata
# that may be needed in order to create or reuse a temp table
if not self.from_cols_from_user:
# don't put values serial columns
from_cols = filter(lambda a: a[3] != True, self.from_columns)
else:
from_cols = self.from_columns
# make sure we set the correct distribution policy
distcols = self.getconfig('gpload:output:match_columns', list)
# MPP-13399, CR-2227
including_defaults = ""
if self.getconfig('gpload:output:including_defaults',bool,True):
including_defaults = " including defaults"
sql = "SELECT * FROM pg_class WHERE relname LIKE 'temp_gpload_reusable_%%';"
resultList = self.db.query(sql.encode('utf-8')).getresult()
if len(resultList) > 0:
self.log(self.WARN, """Old style, reusable tables named "temp_gpload_reusable_*" from a previous versions were found.
Greenplum recommends running "DROP TABLE temp_gpload_reusable_..." on each table. This only needs to be done once.""")
# If the 'reuse tables' option was specified we now try to find an
# already existing staging table in the catalog which will match
# the one that we need to use. It must meet the reuse conditions
is_temp_table = 'TEMP '
target_columns = []
for column in self.into_columns:
if column[2]:
target_columns.append([quote_unident(column[0]), column[1]])
if self.reuse_tables == True:
is_temp_table = ''
target_table_name = quote_unident(self.table)
# create a string from all reuse conditions for staging tables and ancode it
conditions_str = self.get_staging_conditions_string(target_table_name, target_columns, distcols)
encoding_conditions = hashlib.md5(conditions_str).hexdigest()
sql = self.get_reuse_staging_table_query(encoding_conditions)
resultList = self.db.query(sql.encode('utf-8')).getresult()
if len(resultList) > 0:
# found a temp table to reuse. no need to create one. we're done here.
self.staging_table_name = (resultList[0])[0]
self.log(self.INFO, "reusing staging table %s" % self.staging_table_name)
# truncate it so we don't use old data
self.do_truncate(self.staging_table_name)
return
# didn't find an existing staging table suitable for reuse. Format a reusable
# name and issue a CREATE TABLE on it (without TEMP!). Hopefully we can use it
# next time around
# we no longer need the timestamp, since we will never want to create few
# tables with same encoding_conditions
self.staging_table_name = "staging_gpload_reusable_%s" % (encoding_conditions)
self.log(self.INFO, "did not find a staging table to reuse. creating %s" % self.staging_table_name)
# MPP-14667 - self.reuse_tables should change one, and only one, aspect of how we build the following table,
# and that is, whether it's a temp table or not. In other words, is_temp_table = '' iff self.reuse_tables == True.
sql = 'CREATE %sTABLE %s ' % (is_temp_table, self.staging_table_name)
cols = map(lambda a:'%s %s' % (a[0], a[1]), target_columns)
sql += "(%s)" % ','.join(cols)
sql += " DISTRIBUTED BY (%s)" % ', '.join(distcols)
self.log(self.LOG, sql)
if not self.options.D:
self.db.query(sql.encode('utf-8'))
if not self.reuse_tables:
self.cleanupSql.append('DROP TABLE IF EXISTS %s' % self.staging_table_name)
def count_errors(self):
notice_processor(self)
if self.log_errors and not self.options.D:
# make sure we only get errors for our own instance
if not self.reuse_tables:
queryStr = "select count(*) from gp_read_error_log('%s')" % pg.escape_string(self.extTableName)
results = self.db.query(queryStr.encode('utf-8')).getresult()
return (results[0])[0]
else: # reuse_tables
queryStr = "select cmdtime, count(*) from gp_read_error_log('%s') group by cmdtime order by cmdtime desc limit 1" % pg.escape_string(self.extTableName)
results = self.db.query(queryStr.encode('utf-8')).getresult()
global NUM_WARN_ROWS
if len(results) == 0:
NUM_WARN_ROWS = 0
return 0
if (results[0])[0] != self.cmdtime:
self.lastcmdtime = (results[0])[0]
NUM_WARN_ROWS = (results[0])[1]
return (results[0])[1];
return 0
def report_errors(self):
errors = self.count_errors()
if errors==1:
self.log(self.WARN, '1 bad row')
elif errors:
self.log(self.WARN, '%d bad rows'%errors)
# error message is also deleted if external table is dropped.
# if reuse_table is set, error message is not deleted.
if errors and self.log_errors and self.reuse_tables:
self.log(self.WARN, "Please use following query to access the detailed error")
self.log(self.WARN, "select * from gp_read_error_log('{0}') where cmdtime = '{1}'".format(pg.escape_string(self.extTableName), self.lastcmdtime))
self.exitValue = 1 if errors else 0
def do_insert(self, dest):
"""
Handle the INSERT case
"""
if self.reuse_tables:
queryStr = "select cmdtime from gp_read_error_log('%s') group by cmdtime order by cmdtime desc limit 1" % pg.escape_string(self.extTableName)
results = self.db.query(queryStr.encode('utf-8')).getresult()
if len(results) > 0:
self.cmdtime = (results[0])[0]
self.log(self.DEBUG, "into columns " + str(self.into_columns))
cols = filter(lambda a:a[2]!=None, self.into_columns)
# only insert non-serial columns, unless the user told us to
# insert the serials explicitly
if not self.from_cols_from_user:
cols = filter(lambda a:a[3] == False, cols)
sql = 'INSERT INTO %s' % dest
sql += ' (%s)' % ','.join(map(lambda a:a[0], cols))
sql += ' SELECT %s' % ','.join(map(lambda a:a[2], cols))
sql += ' FROM %s' % self.extSchemaTable
# cktan: progress thread is not reliable. revisit later.
#progress = Progress(self,self.ports)
#progress.start()
#self.threads.append(progress)
self.log(self.LOG, sql)
if not self.options.D:
try:
self.rowsInserted = self.db.query(sql.encode('utf-8'))
except Exception, e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = unicode(str(e), errors = 'ignore')
strF = unicode(str(sql), errors = 'ignore')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
#progress.condition.acquire()
#progress.number = 1
#progress.condition.wait()
#progress.condition.release()
self.report_errors()
def do_method_insert(self):
self.create_external_table()
self.do_insert(self.get_qualified_tablename())
def map_stuff(self,config,format,index):
lis = []
theList = self.getconfig(config,list)
theList = convertListToDelimited(theList)
for i in theList:
if type(i) != unicode and type(i) != str:
self.control_file_error("%s must be a YAML sequence of strings"%config)
j = self.into_columns_dict.get(i)
if not j:
self.log(self.ERROR,'column %s in %s does not exist'%(i,config))
if not j[index]:
self.log(self.ERROR,'there is no mapping from the column %s in %s'%(i,config))
lis.append(format(j[0],j[index]))
return lis
def fix_update_cond(self, match):
self.log(self.DEBUG, match.group(0))
return 'into_table.' + match.group(0)
def do_update(self,fromname,index):
"""
UPDATE case
"""
sql = 'update %s into_table ' % self.get_qualified_tablename()
sql += 'set %s '%','.join(self.map_stuff('gpload:output:update_columns',(lambda x,y:'%s=from_table.%s' % (x, y)),index))
sql += 'from %s from_table' % fromname
match = self.map_stuff('gpload:output:match_columns'
, lambda x,y:'into_table.%s=from_table.%s' % (x, y)
, index)
update_condition = self.getconfig('gpload:output:update_condition',
unicode, None)
if update_condition:
#
# Place the table alias infront of column references.
#
# The following logic is not bullet proof. It may not work
# correctly if the user uses an identifier in both its
# delimited and un-delimited format (e.g. where c1 < 7 and "c1" > 2)
# Better lexing and parsing needs to be done here to fix all cases.
#
update_condition = ' ' + update_condition + ' '
for name, type, mapto, seq in self.into_columns:
regexp = '(?<=[^\w])%s(?=[^\w])' % name
self.log(self.DEBUG, 'update_condition re: ' + regexp)
temp_update_condition = update_condition
updateConditionList = splitIntoLiteralsAndNonLiterals(update_condition)
skip = False
newUpdateConditionList = []
update_condition = ''
for uc in updateConditionList:
if skip == False:
uc = re.sub(regexp, self.fix_update_cond, uc)
skip = True
update_condition = update_condition + uc
if update_condition == temp_update_condition:
# see if column can be undelimited, and try again.
if len(name) > 2 and name[1:-1] == name[1:-1].lower():
regexp = '(?<=[^\w])%s(?=[^\w])' % name[1:-1]
self.log(self.DEBUG, 'update_condition undelimited re: ' + regexp)
update_condition = re.sub( regexp
, self.fix_update_cond
, update_condition
)
self.log(self.DEBUG, "updated update_condition to %s" %
update_condition)
match.append(update_condition)
sql += ' where %s' % ' and '.join(match)
self.log(self.LOG, sql)
if not self.options.D:
try:
self.rowsUpdated = self.db.query(sql.encode('utf-8'))
except Exception, e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = unicode(str(e), errors = 'ignore')
strF = unicode(str(sql), errors = 'ignore')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
def get_qualified_tablename(self):
tblname = "%s.%s" % (self.schema, self.table)
return tblname
def get_table_dist_key(self):
# NOTE: this query should be re-written better. the problem is that it is
# not possible to perform a cast on a table name with spaces...
sql = "select attname from pg_attribute a, gp_distribution_policy p , pg_class c, pg_namespace n "+\
"where a.attrelid = c.oid and " + \
"a.attrelid = p.localoid and " + \
"a.attnum = any (p.attrnums) and " + \
"c.relnamespace = n.oid and " + \
"n.nspname = '%s' and c.relname = '%s'; " % (quote_unident(self.schema), quote_unident(self.table))
resultList = self.db.query(sql.encode('utf-8')).getresult()
attrs = []
count = 0
while count < len(resultList):
attrs.append((resultList[count])[0])
count = count + 1
return attrs
def table_supports_update(self):
"""Columns being updated cannot appear in the distribution key."""
distKeyList = self.get_table_dist_key()
distkey = set()
for dk in distKeyList:
distkey.add(quote_ident(dk))
self.distkey = distkey
if len(distkey) != 0:
# not randomly distributed - check that UPDATE_COLUMNS isn't part of the distribution key
updateColumnList = self.getconfig('gpload:output:update_columns',
list,
returnOriginal=True)
update_columns = convertListToDelimited(updateColumnList)
update_columns = set(update_columns)
a = distkey.intersection(update_columns)
if len(a):
self.control_file_error('update_columns cannot reference column(s) in distribution key (%s)' % ', '.join(list(distkey)))
def do_method_update(self):
"""Load the data in and update an existing table based upon it"""
self.table_supports_update()
self.create_staging_table()
self.create_external_table()
self.do_insert(self.staging_table_name)
# These rows are inserted temporarily for processing, so set inserted rows back to zero.
self.rowsInserted = 0
self.do_update(self.staging_table_name, 0)
def do_method_merge(self):
"""insert data not already in the table, update remaining items"""
self.table_supports_update()
self.create_staging_table()
self.create_external_table()
self.do_insert(self.staging_table_name)
self.rowsInserted = 0 # MPP-13024. No rows inserted yet (only to temp table).
self.do_update(self.staging_table_name, 0)
# insert new rows to the target table
match = self.map_stuff('gpload:output:match_columns',lambda x,y:'into_table.%s=from_table.%s'%(x,y),0)
matchColumns = self.getconfig('gpload:output:match_columns',list)
cols = filter(lambda a:a[2] != None, self.into_columns)
sql = 'INSERT INTO %s ' % self.get_qualified_tablename()
sql += '(%s) ' % ','.join(map(lambda a:a[0], cols))
sql += '(SELECT %s ' % ','.join(map(lambda a:'from_table.%s' % a[0], cols))
sql += 'FROM (SELECT *, row_number() OVER (PARTITION BY %s) AS gpload_row_number ' % ','.join(matchColumns)
sql += 'FROM %s) AS from_table ' % self.staging_table_name
sql += 'LEFT OUTER JOIN %s into_table ' % self.get_qualified_tablename()
sql += 'ON %s '%' AND '.join(match)
where = self.map_stuff('gpload:output:match_columns',lambda x,y:'into_table.%s IS NULL'%x,0)
sql += 'WHERE %s ' % ' AND '.join(where)
sql += 'AND gpload_row_number=1)'
self.log(self.LOG, sql)
if not self.options.D:
try:
self.rowsInserted = self.db.query(sql.encode('utf-8'))
except Exception, e:
# We need to be a bit careful about the error since it may contain non-unicode characters
strE = unicode(str(e), errors = 'ignore')
strF = unicode(str(sql), errors = 'ignore')
self.log(self.ERROR, strE + ' encountered while running ' + strF)
def do_truncate(self, tblname):
self.log(self.LOG, "Truncate table %s" %(tblname))
if not self.options.D:
try:
truncateSQLtext = "truncate %s" % tblname
self.db.query(truncateSQLtext.encode('utf-8'))
except Exception, e:
self.log(self.ERROR, 'could not execute truncate target %s: %s' % (tblname, str(e)))
def do_method(self):
# Is the table to be truncated before the load?
preload = self.getconfig('gpload:preload', list, default=None)
method = self.getconfig('gpload:output:mode', unicode, 'insert').lower()
self.log_errors = self.getconfig('gpload:input:log_errors', bool, False)
truncate = False
self.reuse_tables = False
if not self.options.no_auto_trans and not method=='insert':
self.db.query("BEGIN")
if preload:
truncate = self.getconfig('gpload:preload:truncate',bool,False)
self.reuse_tables = self.getconfig('gpload:preload:reuse_tables',bool,False)
if truncate == True:
if method=='insert':
self.do_truncate(self.schemaTable)
else:
self.log(self.ERROR, 'preload truncate operation should be used with insert ' +
'operation only. used with %s' % method)
# sql pre or post processing?
sql = self.getconfig('gpload:sql', list, default=None)
before = None
after = None
if sql:
before = self.getconfig('gpload:sql:before', unicode, default=None)
after = self.getconfig('gpload:sql:after', unicode, default=None)
if before:
self.log(self.LOG, "Pre-SQL from user: %s" % before)
if not self.options.D:
try:
self.db.query(before.encode('utf-8'))
except Exception, e:
self.log(self.ERROR, 'could not execute SQL in sql:before "%s": %s' %
(before, str(e)))
if method=='insert':
self.do_method_insert()
elif method=='update':
self.do_method_update()
elif method=='merge':
self.do_method_merge()
else:
self.control_file_error('unsupported method %s' % method)
# truncate the staging table to avoid dumping it's content - see MPP-15474
if method=='merge' or method=='update':
self.do_truncate(self.staging_table_name)
if after:
self.log(self.LOG, "Post-SQL from user: %s" % after)
if not self.options.D:
try:
self.db.query(after.encode('utf-8'))
except Exception, e:
self.log(self.ERROR, 'could not execute SQL in sql:after "%s": %s' %
(after, str(e)))
if not self.options.no_auto_trans and not method=='insert':
self.db.query("COMMIT")
def stop_gpfdists(self):
if self.subprocesses:
self.log(self.LOG, 'killing gpfdist')
for a in self.subprocesses:
try:
if platform.system() in ['Windows', 'Microsoft']:
# win32 API is better but hard for us
# to install, so we use the crude method
subprocess.Popen("taskkill /F /T /PID %i" % a.pid,
shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
os.kill(a.pid, signal.SIGTERM)
except OSError:
pass
for t in self.threads:
t.join()
def run2(self):
self.log(self.DEBUG, 'config ' + str(self.config))
start = time.time()
self.read_config()
self.setup_connection()
self.read_table_metadata()
self.read_columns()
self.read_mapping()
self.start_gpfdists()
self.do_method()
self.log(self.INFO, 'running time: %.2f seconds'%(time.time()-start))
def run(self):
self.db = None
self.rowsInserted = 0
self.rowsUpdated = 0
signal.signal(signal.SIGINT, handle_kill)
signal.signal(signal.SIGTERM, handle_kill)
# win32 doesn't do SIGQUIT
if not platform.system() in ['Windows', 'Microsoft']:
signal.signal(signal.SIGQUIT, handle_kill)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
try:
self.run2()
except Exception:
traceback.print_exc(file=self.logfile)
self.logfile.flush()
self.exitValue = 2
if (self.options.qv > self.INFO):
traceback.print_exc()
else:
self.log(self.ERROR, "unexpected error -- backtrace " +
"written to log file")
finally:
self.stop_gpfdists()
if self.cleanupSql:
self.log(self.LOG, 'removing temporary data')
self.setup_connection()
for a in self.cleanupSql:
try:
self.log(self.DEBUG, a)
self.db.query(a.encode('utf-8'))
except (Exception, SystemExit):
traceback.print_exc(file=self.logfile)
self.logfile.flush()
traceback.print_exc()
if self.db != None:
self.db.close()
self.log(self.INFO, 'rows Inserted = ' + str(self.rowsInserted))
self.log(self.INFO, 'rows Updated = ' + str(self.rowsUpdated))
self.log(self.INFO, 'data formatting errors = ' + str(NUM_WARN_ROWS))
if self.exitValue==0:
self.log(self.INFO, 'gpload succeeded')
elif self.exitValue==1:
self.log(self.INFO, 'gpload succeeded with warnings')
else:
self.log(self.INFO, 'gpload failed')
## MPP-19015 - Extra python thread shutdown time is needed on HP-UX
if platform.uname()[0] == 'HP-UX':
time.sleep(1)
if __name__ == '__main__':
g = gpload(sys.argv[1:])
g.run()
sys.exit(g.exitValue)
| apache-2.0 | 1,737,471,462,161,924,600 | 35.211429 | 188 | 0.541325 | false |
Openandgit/2014cpb_final_project- | std/b40323254.py | 1 | 8479 | #@+leo-ver=5-thin
#@+node:lee.20141223114246.40: * @file example2.py
#@@language python
#@@tabwidth -4
import cherrypy
import random
from std.asciisymbol import asciiImage
#@+others
#@+node:lee.20141223114246.41: ** class Application
class Application(object):
#@+others
#@+node:lee.20141223114246.42: *3* def init
def __init__(self):
#你的名子
self.name = '蔡柏峰'
# 你的學號
self.number = '40323254'
# 你的班級
self.classes = 'nfu'
# 你的 github repository url
self.github_repo_url = 'https://github.com/Openandgit/2014cpb_final_project-'
# 你的 openshift app
self.openshift_url = 'http://cpb-nfutaiwan.rhcloud.com/'
# 你的自評
self.evaluation = [('Project 7', 80), ('Project 8', 90), ('Project 9', 100)]
# 你的照片 url
self.photo_url = 'http://placekitten.com/g/350/300'
# 這裡是心得
self.my_remark = """
Computer Programming is good course
"""
#@+node:lee.20141223114246.43: *3* def use_template
def use_template(self, content):
above = """
<!DOCTYPE html>
<html lang="en">
<head>
<!-- Basic Page Needs
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<meta charset="utf-8">
<title>title</title>
<meta name="description" content="">
<meta name="author" content="">
<!-- Mobile Specific Metas
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- FONT
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<style>
@font-face {
font-family: 'Raleway';
font-style: normal;
font-weight: 300;
src: local('Raleway Light'), local('Raleway-Light'), url(/static/font/Raleway300.woff) format('woff');
}
@font-face {
font-family: 'Raleway';
font-style: normal;
font-weight: 400;
src: local('Raleway'), url(/static/font/Raleway400.woff) format('woff');
}
@font-face {
font-family: 'Raleway';
font-style: normal;
font-weight: 600;
src: local('Raleway SemiBold'), local('Raleway-SemiBold'), url(/static/font/Raleway600.woff) format('woff');
}
</style>
<!-- CSS
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<link rel="stylesheet" href="/static/css/normalize.css">
<link rel="stylesheet" href="/static/css/skeleton.css">
<link rel="stylesheet" href="/static/css/custom.css">
<!-- Favicon
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<link rel="icon" type="image/png" href="/static/images/favicon.png" />
</head>
<body>
<!-- Primary Page Layout
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
<!-- .container is main centered wrapper -->
<div class="container">
"""
below = """
</div>
<footer class="center">
2014 Computer Programming
</footer>
<!-- Note: columns can be nested, but it's not recommended since Skeleton's grid has %-based gutters, meaning a nested grid results in variable with gutters (which can end up being *really* small on certain browser/device sizes) -->
<!-- End Document
–––––––––––––––––––––––––––––––––––––––––––––––––– -->
</body>
</html>
"""
return above + self.generate_nav(self.link()) + content + below
#@+node:lee.20141223114246.44: *3* def generate_nav
def generate_nav(self, anchors):
above_side = """
<div class="row">
<div class="nav twelve columns">
<input type="checkbox" id="toggle" />
<div>
<label for="toggle" class="toggle" data-open="Main Menu" data-close="Close Menu" onclick></label>
<ul class="menu">
"""
content = ''
for link, name in anchors:
content += '<li><a href="' + link + '">' + name + '</a></li>'
below_side = """
</ul>
</div>
</div>
</div>
"""
return above_side + content + below_side
#@+node:lee.20141223114246.45: *3* def generate_form_page
def generate_form_page(self, form='', output=''):
content = """
<div class="content">
<div class="row">
<div class="one-half column">
%s
</div>
<div class="one-half column">
<div class="output u-full-width">
<p>Output:</p>
<p>
%s
</p>
</div>
</div>
</div>
</div>
"""%(form, output)
return self.use_template(content)
#@+node:lee.20141223114246.55: *3* def generate_headline_page
def generate_headline_page(self, headline, output):
content = """
<div class="content">
<div class="row">
<div class="headline center">%s</div>
<div class="twelve columns">
<p>%s</p>
</div>
</div>
</div>
""" % (headline, output)
return self.use_template(content)
#@+node:lee.20141223114246.46: *3* def generate_personal_page
def generate_personal_page(self, data=None):
if data is None:
return ''
# check data have all we need, if the key not exist, use empty string
must_have_key = ('photo_url', 'name', 'ID', 'class', 'evaluation')
for key in must_have_key:
data[key] = data.get(key, '')
if 'evaluation' in data:
table_content = ''
for projectName, score in data['evaluation']:
table_content += """<tr><td>%s</td><td>%s</td>"""%(projectName, score)
data['evaluation'] = table_content
content = """
<div class="content">
<div class="row">
<div class="one-half column">
<div class="headline">
About Me
</div>
<div class="photo">
<img src="{photo_url:s}" alt="photo">
</div>
<div class="meta">
<ul>
<li>Name: {name:s}</li>
<li>ID NO. : {ID:s}</li>
<li>Class: {class:s}</li>
</ul>
</div>
</div>
<div class="one-half column">
<div class="headline">
Self Evaluation
</div>
<div>
<table class="u-full-width">
<thead>
<tr>
<th>Project Name</th>
<th>Score</th>
</tr>
</thead>
<tbody>
{evaluation:s}
</tbody>
</table>
</div>
</div>
</div>
</div>
""".format(**data)
return self.use_template(content)
#@+node:lee.20141223114246.47: *3* def link
def link(self):
aviable_link = [("index", "HOME"), ("remark", "心得"), (self.openshift_url, "openshift app"),(self.github_repo_url, "github repo"),]
return aviable_link
#@+node:lee.20141223114246.54: *3* def remark
@cherrypy.expose
def remark(self):
# 這裡是心得
# generate_headline_page(你的標題, 你的內容)
return self.generate_headline_page("REMARK", self.my_remark)
#@+node:lee.20141223114246.48: *3* def index
@cherrypy.expose
def index(self):
# 這裡是首頁
data = {
'name':self.name,
'ID':self.number,
'class':self.classes,
'evaluation': self.evaluation,
'photo_url':self.photo_url,
}
return self.generate_personal_page(data)
#@-others
#@-others
#@-leo
| gpl-3.0 | 5,243,364,340,695,093,000 | 30.584362 | 236 | 0.489511 | false |
devclone/enigma2-9f38fd6 | lib/python/Components/GUISkin.py | 51 | 3199 | from GUIComponent import GUIComponent
from skin import applyAllAttributes
from Tools.CList import CList
from Sources.StaticText import StaticText
class GUISkin:
__module__ = __name__
def __init__(self):
self["Title"] = StaticText()
self.onLayoutFinish = [ ]
self.summaries = CList()
self.instance = None
self.desktop = None
def createGUIScreen(self, parent, desktop, updateonly = False):
for val in self.renderer:
if isinstance(val, GUIComponent):
if not updateonly:
val.GUIcreate(parent)
if not val.applySkin(desktop, self):
print "warning, skin is missing renderer", val, "in", self
for key in self:
val = self[key]
if isinstance(val, GUIComponent):
if not updateonly:
val.GUIcreate(parent)
depr = val.deprecationInfo
if val.applySkin(desktop, self):
if depr:
print "WARNING: OBSOLETE COMPONENT '%s' USED IN SKIN. USE '%s' INSTEAD!" % (key, depr[0])
print "OBSOLETE COMPONENT WILL BE REMOVED %s, PLEASE UPDATE!" % (depr[1])
elif not depr:
print "warning, skin is missing element", key, "in", self
for w in self.additionalWidgets:
if not updateonly:
w.instance = w.widget(parent)
# w.instance.thisown = 0
applyAllAttributes(w.instance, desktop, w.skinAttributes, self.scale)
for f in self.onLayoutFinish:
if type(f) is not type(self.close): # is this the best way to do this?
exec f in globals(), locals()
else:
f()
def deleteGUIScreen(self):
for (name, val) in self.items():
if isinstance(val, GUIComponent):
val.GUIdelete()
def close(self):
self.deleteGUIScreen()
def createSummary(self):
return None
def addSummary(self, summary):
self.summaries.append(summary)
def removeSummary(self, summary):
self.summaries.remove(summary)
def setTitle(self, title):
try:
if self.instance:
self.instance.setTitle(title)
self["Title"].text = title
self.summaries.setTitle(title)
except:
pass
def getTitle(self):
return self["Title"].text
title = property(getTitle, setTitle)
def setDesktop(self, desktop):
self.desktop = desktop
def applySkin(self):
z = 0
baseres = (720, 576) # FIXME: a skin might have set another resolution, which should be the base res
idx = 0
skin_title_idx = -1
title = self.title
for (key, value) in self.skinAttributes:
if key == "zPosition":
z = int(value)
elif key == "title":
skin_title_idx = idx
if title:
self.skinAttributes[skin_title_idx] = ("title", title)
else:
self["Title"].text = value
self.summaries.setTitle(value)
elif key == "baseResolution":
baseres = tuple([int(x) for x in value.split(',')])
idx += 1
self.scale = ((baseres[0], baseres[0]), (baseres[1], baseres[1]))
if not self.instance:
from enigma import eWindow
self.instance = eWindow(self.desktop, z)
if skin_title_idx == -1 and title:
self.skinAttributes.append(("title", title))
# we need to make sure that certain attributes come last
self.skinAttributes.sort(key=lambda a: {"position": 1}.get(a[0], 0))
applyAllAttributes(self.instance, self.desktop, self.skinAttributes, self.scale)
self.createGUIScreen(self.instance, self.desktop)
| gpl-2.0 | -2,634,629,307,604,525,000 | 27.061404 | 102 | 0.680838 | false |
DadanielZ/incubator-eagle | eagle-external/hadoop_jmx_collector/system_metric_collector.py | 1 | 13723 | # !/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from metric_collector import MetricCollector, Runner
import logging, socket, string, os, re, time
class SystemMetricCollector(MetricCollector):
METRIC_PREFIX = "system"
METRIC_NAME_EXCLUDE = re.compile(r"[\(|\)]")
def run(self):
if self.config["env"].has_key("cpu_stat_file"):
self.cpu_stat_file = self.config["env"]["cpu_stat_file"]
logging.info("Overrode env.cpu_stat_file: %s", self.cpu_stat_file)
else:
self.cpu_stat_file = "/tmp/eagle_cpu_usage_state"
logging.info("Using default env.cpu_stat_file: %s", self.cpu_stat_file)
self.try_exec_func(
self.collect_cpu_metric,
self.collect_uptime_metric,
self.collect_memory_metric,
self.collect_loadavg_metric,
self.collect_cpu_temp_metric,
self.collect_nic_metric,
self.collect_smartdisk_metric,
self.collect_diskstat_metric
)
def try_exec_func(self, *funcs):
result = dict()
succeed_num = 0
failed_num = 0
for func in funcs:
try:
logging.info("Executing: %s", func.__name__)
func()
result[func.__name__] = "success"
succeed_num = succeed_num + 1
except Exception as e:
logging.warn("Failed to execute: %s", func.__name__)
logging.exception(e)
result[func.__name__] = "error: %s: %s" % (type(e), e)
failed_num = failed_num + 1
result_desc = ""
for key in result:
result_desc = result_desc + "%-30s: %-30s\n" % (key, result[key])
logging.info("Execution result (total: %s, succeed: %s, failed: %s): \n\n%s", len(funcs), succeed_num,
failed_num, result_desc)
# ====================================
# CPU Usage
# ====================================
def collect_cpu_metric(self):
"""
CPU Usage Percentage Metrics:
system.cpu.usage: (user + nice + system + wait + irq + softirq + steal + guest) / (user + nice + system + idle + wait + irq + softirq + steal + guest)
Example:
{'timestamp': 1483594861458, 'metric': 'system.cpu.usage', 'site': u'sandbox', 'value': 0.048, 'host': 'localhost', 'device': 'cpuN'}
system.cpu.totalusage: Sum(Each CPU Usage) / Sum (CPU Total)
Example:
{'timestamp': 1483594861484, 'metric': 'system.cpu.totalusage', 'site': u'sandbox', 'value': 0.17, 'host': 'sandbox.hortonworks.com', 'device': 'cpu'}
"""
cpu_metric = self.new_metric()
cpu_info = os.popen('cat /proc/stat').readlines()
dimensions = ["cpu", "user", "nice", "system", "idle", "wait", "irq", "softirq", "steal", "guest"]
total_cpu = 0
total_cpu_usage = 0
cpu_stat_pre = None
data_dir = self.cpu_stat_file
if os.path.exists(data_dir):
fd = open(data_dir, "r")
cpu_stat_pre = fd.read()
fd.close()
for item in cpu_info:
if re.match(r'^cpu\d+', item) is None:
continue
items = re.split("\s+", item.strip())
demens = min(len(dimensions), len(items))
metric_event = dict()
for i in range(1, demens):
metric_event[dimensions[i]] = int(items[i])
cpu_metric['timestamp'] = int(round(time.time() * 1000))
cpu_metric['metric'] = self.METRIC_PREFIX + "." + 'cpu.' + dimensions[i]
cpu_metric['device'] = items[0]
cpu_metric['value'] = items[i]
self.collect(cpu_metric)
per_cpu_usage = metric_event["user"] + metric_event["nice"] + metric_event["system"] + metric_event[
"wait"] + metric_event["irq"] + metric_event["softirq"] + metric_event["steal"] + metric_event["guest"]
per_cpu_total = metric_event["user"] + metric_event["nice"] + metric_event["system"] + metric_event[
"idle"] + metric_event["wait"] + metric_event["irq"] + metric_event["softirq"] + metric_event["steal"] + metric_event["guest"]
total_cpu += per_cpu_total
total_cpu_usage += per_cpu_usage
# system.cpu.usage
cpu_metric['timestamp'] = int(round(time.time() * 1000))
cpu_metric['metric'] = self.METRIC_PREFIX + "." + 'cpu.' + "usage"
cpu_metric['device'] = items[0]
cpu_metric['value'] = per_cpu_usage * 1.0 /per_cpu_total
self.collect(cpu_metric)
cup_stat_current = str(total_cpu_usage) + " " + str(total_cpu)
logging.info("Current cpu stat: %s", cup_stat_current)
fd = open(data_dir, "w")
fd.write(cup_stat_current)
fd.close()
pre_total_cpu_usage = 0
pre_total_cpu = 0
if cpu_stat_pre != None:
result = re.split("\s+", cpu_stat_pre.rstrip())
pre_total_cpu_usage = int(result[0])
pre_total_cpu = int(result[1])
cpu_metric['timestamp'] = int(round(time.time() * 1000))
cpu_metric['metric'] = self.METRIC_PREFIX + "." + 'cpu.' + "totalusage"
cpu_metric['device'] = "cpu"
cpu_metric['value'] = (total_cpu_usage - pre_total_cpu_usage) * 1.0 / (total_cpu - pre_total_cpu)
self.collect(cpu_metric)
# ====================================
# OS Up Time
# ====================================
def collect_uptime_metric(self):
metric = self.new_metric()
demension = ["uptime.day", "idletime.day"]
output = os.popen('cat /proc/uptime').readlines()
for item in output:
items = re.split("\s+", item.rstrip())
for i in range(len(demension)):
metric["timestamp"] = int(round(time.time() * 1000))
metric["metric"] = self.METRIC_PREFIX + "." + 'uptime' + '.' + demension[i]
metric["value"] = str(round(float(items[i]) / 86400, 2))
self.collect(metric)
# ====================================
# Memory
# ====================================
def collect_memory_metric(self):
event = self.new_metric()
event["host"] = self.fqdn
output = os.popen('cat /proc/meminfo').readlines()
mem_info = dict()
for item in output:
items = re.split(":?\s+", item.rstrip())
# print items
mem_info[items[0]] = int(items[1])
itemNum = len(items)
metric = 'memory' + '.' + items[0]
if (len(items) > 2):
metric = metric + '.' + items[2]
event["timestamp"] = int(round(time.time() * 1000))
event["metric"] = self.METRIC_NAME_EXCLUDE.sub("", self.METRIC_PREFIX + "." + metric.lower())
event["value"] = items[1]
event["device"] = 'memory'
self.collect(event)
usage = (mem_info['MemTotal'] - mem_info['MemFree'] - mem_info['Buffers'] - mem_info['Cached']) * 100.0 / \
mem_info[
'MemTotal']
usage = round(usage, 2)
self.emit_metric(event, self.METRIC_PREFIX, "memory.usage", usage, "memory")
# ====================================
# Load AVG
# ====================================
def collect_loadavg_metric(self):
"""
Collect Load Avg Metrics
"""
demension = ['cpu.loadavg.1min', 'cpu.loadavg.5min', 'cpu.loadavg.15min']
output = os.popen('cat /proc/loadavg').readlines()
for item in output:
items = re.split("\s+", item.rstrip())
demens = min(len(demension), len(items))
for i in range(demens):
event = self.new_metric()
event["timestamp"] = int(round(time.time() * 1000))
event["metric"] = self.METRIC_PREFIX + "." + demension[i]
event["value"] = items[i]
event["device"] = 'cpu'
self.collect(event)
# ====================================
# IPMI CPU Temp
# ====================================
def collect_cpu_temp_metric(self):
output = os.popen('sudo ipmitool sdr | grep Temp | grep CPU').readlines()
for item in output:
items = re.split("^(CPU\d+)\sTemp\.\s+\|\s+(\d+|\d+\.\d+)\s", item.rstrip())
event = self.new_metric()
event["timestamp"] = int(round(time.time() * 1000))
event["metric"] = DATA_TYPE + "." + 'cpu.temp'
event["value"] = items[2]
event["device"] = item[1]
self.collect(event)
# ====================================
# NIC Metrics
# ====================================
def collect_nic_metric(self):
demension = ['receivedbytes', 'receivedpackets', 'receivederrs', 'receiveddrop', 'transmitbytes',
'transmitpackets',
'transmiterrs', 'transmitdrop']
output = os.popen("cat /proc/net/dev").readlines()
for item in output:
if re.match(r'^\s+eth\d+:', item) is None:
continue
items = re.split("[:\s]+", item.strip())
filtered_items = items[1:5] + items[9:13]
for i in range(len(demension)):
kafka_dict = self.new_metric()
kafka_dict["timestamp"] = int(round(time.time() * 1000))
kafka_dict['metric'] = self.METRIC_PREFIX + "." + 'nic.' + demension[i]
kafka_dict["value"] = filtered_items[i]
kafka_dict["device"] = items[0]
self.collect(kafka_dict)
# ====================================
# Smart Disk Metrics
# ====================================
def collect_smartdisk_metric(self):
harddisks = os.popen("lsscsi").readlines()
for item in harddisks:
items = re.split('\/', item.strip())
# print items
smartctl = os.popen('sudo smartctl -A /dev/' + items[-1]).readlines()
for line in smartctl:
line = line.strip()
if re.match(r'^[\d]+\s', line) is None:
continue
lineitems = re.split("\s+", line)
metric = 'smartdisk.' + lineitems[1]
kafka_dict = self.new_metric()
kafka_dict['metric'] = DATA_TYPE + "." + metric.lower()
kafka_dict["timestamp"] = int(round(time.time() * 1000))
kafka_dict["value"] = lineitems[-1]
kafka_dict["device"] = 'smartdisk'
self.collect(kafka_dict)
# ====================================
# Disk Stat Metrics
# ====================================
def collect_diskstat_metric(self):
"""
FIXME: IndexError: list index out of range
"""
demension = ['readrate', 'writerate', 'avgwaittime', 'utilization', 'disktotal', 'diskused', 'usage']
iostat_output = os.popen("iostat -xk 1 2 | grep ^sd").readlines()
# remove the first set of elements
iostat_output = iostat_output[len(iostat_output) / 2:]
iostat_dict = {}
for item in iostat_output:
items = re.split('\s+', item.strip())
filtered_items = [items[5], items[6], items[9], items[11]]
iostat_dict[items[0]] = filtered_items
disk_output = os.popen("df -k | grep ^/dev").readlines()
for item in disk_output:
items = re.split('\s+', item.strip())
disks = re.split('^\/dev\/(\w+)\d+$', items[0])
logging.info(len(disks))
disk = disks[1]
iostat_dict[disk].append(items[1])
iostat_dict[disk].append(items[2])
iostat_dict[disk].append(items[4].rstrip('%'))
for key, metrics in iostat_dict.iteritems():
for i in range(len(metrics)):
metric = 'disk.' + demension[i]
kafka_dict = self.new_metric()
kafka_dict['metric'] = DATA_TYPE + "." + metric.lower()
kafka_dict["timestamp"] = int(round(time.time() * 1000))
kafka_dict["value"] = metrics[i]
kafka_dict["device"] = key
self.collect(kafka_dict)
# ====================================
# Helper Methods
# ====================================
def emit_metric(self, event, prefix, metric, value, device):
event["timestamp"] = int(round(time.time() * 1000))
event["metric"] = prefix + "." + metric.lower()
event["value"] = str(value)
event["device"] = device
self.collect(event)
def new_metric(self):
metric = dict()
metric["host"] = self.fqdn
return metric
if __name__ == '__main__':
Runner.run(SystemMetricCollector())
| apache-2.0 | -1,946,900,582,706,358,000 | 39.842262 | 166 | 0.507688 | false |
Pluto-tv/chromium-crosswalk | tools/telemetry/telemetry/internal/platform/platform_backend.py | 6 | 8322 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import weakref
from telemetry.internal.forwarders import do_nothing_forwarder
from telemetry.internal.platform import network_controller_backend
from telemetry.internal.platform import tracing_controller_backend
# pylint: disable=W0613
class PlatformBackend(object):
def __init__(self, device=None):
""" Initalize an instance of PlatformBackend from a device optionally.
Call sites need to use SupportsDevice before intialization to check
whether this platform backend supports the device.
If device is None, this constructor returns the host platform backend
which telemetry is running on.
Args:
device: an instance of telemetry.core.platform.device.Device.
"""
if device and not self.SupportsDevice(device):
raise ValueError('Unsupported device: %s' % device.name)
self._platform = None
self._running_browser_backends = weakref.WeakSet()
self._network_controller_backend = None
self._tracing_controller_backend = None
self._forwarder_factory = None
def InitPlatformBackend(self):
self._network_controller_backend = (
network_controller_backend.NetworkControllerBackend(self))
self._tracing_controller_backend = (
tracing_controller_backend.TracingControllerBackend(self))
@classmethod
def IsPlatformBackendForHost(cls):
""" Returns whether this platform backend is the platform backend to be used
for the host device which telemetry is running on. """
return False
@classmethod
def SupportsDevice(cls, device):
""" Returns whether this platform backend supports intialization from the
device. """
return False
@classmethod
def CreatePlatformForDevice(cls, device, finder_options):
raise NotImplementedError
def SetPlatform(self, platform):
assert self._platform == None
self._platform = platform
@property
def platform(self):
return self._platform
@property
def is_host_platform(self):
return self._platform.is_host_platform
@property
def running_browser_backends(self):
return list(self._running_browser_backends)
@property
def network_controller_backend(self):
return self._network_controller_backend
@property
def tracing_controller_backend(self):
return self._tracing_controller_backend
@property
def forwarder_factory(self):
if not self._forwarder_factory:
self._forwarder_factory = do_nothing_forwarder.DoNothingForwarderFactory()
return self._forwarder_factory
def GetRemotePort(self, port):
return port
def DidCreateBrowser(self, browser, browser_backend):
browser_options = browser_backend.browser_options
self.SetFullPerformanceModeEnabled(browser_options.full_performance_mode)
# TODO(slamm): Remove this call when replay browser_backend dependencies
# get moved to platform. https://crbug.com/423962
self._network_controller_backend.UpdateReplay(browser_backend)
def DidStartBrowser(self, browser, browser_backend):
assert browser not in self._running_browser_backends
self._running_browser_backends.add(browser_backend)
def WillCloseBrowser(self, browser, browser_backend):
# TODO(slamm): Move this call when replay's life cycle is no longer
# tied to the browser. https://crbug.com/424777
self._network_controller_backend.StopReplay()
is_last_browser = len(self._running_browser_backends) <= 1
if is_last_browser:
self.SetFullPerformanceModeEnabled(False)
self._running_browser_backends.discard(browser_backend)
@property
def wpr_http_device_port(self):
return self._network_controller_backend.wpr_http_device_port
@property
def wpr_https_device_port(self):
return self._network_controller_backend.wpr_https_device_port
def IsDisplayTracingSupported(self):
return False
def StartDisplayTracing(self):
"""Start gathering a trace with frame timestamps close to physical
display."""
raise NotImplementedError()
def StopDisplayTracing(self):
"""Stop gathering a trace with frame timestamps close to physical display.
Returns a raw tracing events that contains the timestamps of physical
display.
"""
raise NotImplementedError()
def SetFullPerformanceModeEnabled(self, enabled):
pass
def CanMonitorThermalThrottling(self):
return False
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
raise NotImplementedError()
def GetSystemTotalPhysicalMemory(self):
raise NotImplementedError()
def GetCpuStats(self, pid):
return {}
def GetCpuTimestamp(self):
return {}
def PurgeUnpinnedMemory(self):
pass
def GetMemoryStats(self, pid):
return {}
def GetChildPids(self, pid):
raise NotImplementedError()
def GetCommandLine(self, pid):
raise NotImplementedError()
def GetDeviceTypeName(self):
raise NotImplementedError()
def GetArchName(self):
raise NotImplementedError()
def GetOSName(self):
raise NotImplementedError()
def GetOSVersionName(self):
raise NotImplementedError()
def CanFlushIndividualFilesFromSystemCache(self):
raise NotImplementedError()
def FlushEntireSystemCache(self):
raise NotImplementedError()
def FlushSystemCacheForDirectory(self, directory):
raise NotImplementedError()
def FlushDnsCache(self):
pass
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
raise NotImplementedError()
def IsApplicationRunning(self, application):
raise NotImplementedError()
def CanLaunchApplication(self, application):
return False
def InstallApplication(self, application):
raise NotImplementedError()
def CanCaptureVideo(self):
return False
def StartVideoCapture(self, min_bitrate_mbps):
raise NotImplementedError()
@property
def is_video_capture_running(self):
return False
def StopVideoCapture(self):
raise NotImplementedError()
def CanMonitorPower(self):
return False
def CanMeasurePerApplicationPower(self):
return False
def StartMonitoringPower(self, browser):
raise NotImplementedError()
def StopMonitoringPower(self):
raise NotImplementedError()
def CanMonitorNetworkData(self):
return False
def GetNetworkData(self, browser):
raise NotImplementedError()
def ReadMsr(self, msr_number, start=0, length=64):
"""Read a CPU model-specific register (MSR).
Which MSRs are available depends on the CPU model.
On systems with multiple CPUs, this function may run on any CPU.
Args:
msr_number: The number of the register to read.
start: The least significant bit to read, zero-indexed.
(Said another way, the number of bits to right-shift the MSR value.)
length: The number of bits to read. MSRs are 64 bits, even on 32-bit CPUs.
"""
raise NotImplementedError()
@property
def wpr_ca_cert_path(self):
return None
def IsCooperativeShutdownSupported(self):
"""Indicates whether CooperativelyShutdown, below, is supported.
It is not necessary to implement it on all platforms."""
return False
def CooperativelyShutdown(self, proc, app_name):
"""Cooperatively shut down the given process from subprocess.Popen.
Currently this is only implemented on Windows. See
crbug.com/424024 for background on why it was added.
Args:
proc: a process object returned from subprocess.Popen.
app_name: on Windows, is the prefix of the application's window
class name that should be searched for. This helps ensure
that only the application's windows are closed.
Returns True if it is believed the attempt succeeded.
"""
raise NotImplementedError()
def PathExists(self, path, timeout=None, retries=None):
"""Tests whether the given path exists on the target platform.
Args:
path: path in request.
timeout: timeout.
retries: num of retries.
Return:
Whether the path exists on the target platform.
"""
raise NotImplementedError()
| bsd-3-clause | 941,576,524,506,672,100 | 27.895833 | 80 | 0.731915 | false |
farseerri/git_code | tests/system/suite_QMLS/tst_QMLS05/test.py | 4 | 3000 | #############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://www.qt.io/licensing. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../shared/qmls.py")
def main():
editorArea = startQtCreatorWithNewAppAtQMLEditor(tempDir(), "SampleApp", "Text {")
if not editorArea:
return
homeKey = "<Home>"
if platform.system() == "Darwin":
homeKey = "<Ctrl+Left>"
for i in range(2):
type(editorArea, homeKey)
type(editorArea, "<Return>")
type(editorArea, "<Up>")
type(editorArea, "<Tab>")
type(editorArea, "Item { x: 10; y: 20; width: 10 }")
for i in range(30):
type(editorArea, "<Left>")
invokeMenuItem("File", "Save All")
# activate menu and apply 'Refactoring - Split initializer'
numLinesExpected = len(str(editorArea.plainText).splitlines()) + 4
try:
invokeContextMenuItem(editorArea, "Refactoring", "Split Initializer")
except:
# If menu item is disabled it needs to reopen the menu for updating
invokeContextMenuItem(editorArea, "Refactoring", "Split Initializer")
# wait until refactoring ended
waitFor("len(str(editorArea.plainText).splitlines()) == numLinesExpected", 5000)
# verify if refactoring was properly applied - each part on separate line
verifyMessage = "Verifying split initializer functionality at element line."
for line in ["Item {", "x: 10;", "y: 20;", "width: 10", "}"]:
verifyCurrentLine(editorArea, line, verifyMessage)
type(editorArea, "<Down>")
#save and exit
invokeMenuItem("File", "Save All")
invokeMenuItem("File", "Exit")
| lgpl-2.1 | -1,670,832,586,332,385,500 | 44.454545 | 86 | 0.665667 | false |
sloth4413/duktape | util/dump_bytecode.py | 10 | 3509 | #!/usr/bin/python
#
# Utility to dump bytecode into a human readable form.
#
import os
import sys
import struct
import optparse
def decode_string(buf, off):
strlen, = struct.unpack('>L', buf[off:off+4])
off += 4
strdata = buf[off:off+strlen]
off += strlen
return off, strdata
def sanitize_string(val):
# Don't try to UTF-8 decode, just escape non-printable ASCII.
def f(c):
if ord(c) < 0x20 or ord(c) > 0x7e or c in '\'"':
return '\\x%02x' % ord(c)
else:
return c
return "'" + ''.join(map(f, val)) + "'"
def decode_sanitize_string(buf, off):
off, val = decode_string(buf, off)
return off, sanitize_string(val)
def dump_function(buf, off, ind):
count_inst, count_const, count_funcs = struct.unpack('>LLL', buf[off:off+12])
off += 12
print '%sInstructions: %d' % (ind, count_inst)
print '%sConstants: %d' % (ind, count_const)
print '%sInner functions: %d' % (ind, count_funcs)
nregs, nargs, start_line, end_line = struct.unpack('>HHLL', buf[off:off+12])
off += 12
print '%sNregs: %d' % (ind, nregs)
print '%sNargs: %d' % (ind, nargs)
print '%sStart line number: %d' % (ind, start_line)
print '%sEnd line number: %d' % (ind, end_line)
compfunc_flags, = struct.unpack('>L', buf[off:off+4])
off += 4
print '%sduk_hcompiledfunction flags: 0x%08x' % (ind, compfunc_flags)
for i in xrange(count_inst):
ins, = struct.unpack('>L', buf[off:off+4])
off += 4
print '%s %06d: %08lx' % (ind, i, ins)
print '%sConstants:' % ind
for i in xrange(count_const):
const_type, = struct.unpack('B', buf[off:off+1])
off += 1
if const_type == 0x00:
off, strdata = decode_sanitize_string(buf, off)
print '%s %06d: %s' % (ind, i, strdata)
elif const_type == 0x01:
num, = struct.unpack('>d', buf[off:off+8])
off += 8
print '%s %06d: %f' % (ind, i, num)
else:
raise Exception('invalid constant type: %d' % const_type)
for i in xrange(count_funcs):
print '%sInner function %d:' % (ind, i)
off = dump_function(buf, off, ind + ' ')
val, = struct.unpack('>L', buf[off:off+4])
off += 4
print '%s.length: %d' % (ind, val)
off, val = decode_sanitize_string(buf, off)
print '%s.name: %s' % (ind, val)
off, val = decode_sanitize_string(buf, off)
print '%s.fileName: %s' % (ind, val)
off, val = decode_string(buf, off) # actually a buffer
print '%s._Pc2line: %s' % (ind, val.encode('hex'))
while True:
off, name = decode_string(buf, off)
if name == '':
break
name = sanitize_string(name)
val, = struct.unpack('>L', buf[off:off+4])
off += 4
print '%s_Varmap[%s] = %d' % (ind, name, val)
idx = 0
while True:
off, name = decode_string(buf, off)
if name == '':
break
name = sanitize_string(name)
print '%s_Formals[%d] = %s' % (ind, idx, name)
idx += 1
return off
def dump_bytecode(buf, off, ind):
sig, ver = struct.unpack('BB', buf[off:off+2])
off += 2
if sig != 0xff:
raise Exception('invalid signature byte: %d' % sig)
if ver != 0x00:
raise Exception('unsupported bytecode version: %d' % ver)
print '%sBytecode version: 0x%02x' % (ind, ver)
off = dump_function(buf, off, ind + ' ')
return off
def main():
parser = optparse.OptionParser()
parser.add_option('--hex-decode', dest='hex_decode', default=False, action='store_true', help='Input file is ASCII hex encoded, decode before dump')
(opts, args) = parser.parse_args()
with open(args[0], 'rb') as f:
d = f.read()
if opts.hex_decode:
d = d.strip()
d = d.decode('hex')
dump_bytecode(d, 0, '')
if __name__ == '__main__':
main()
| mit | 2,674,871,620,452,037,600 | 25.992308 | 149 | 0.618695 | false |
BhallaLab/moose-full | moose-examples/snippets/MULTI/multi1.py | 2 | 13980 | # multi1.py ---
# Upi Bhalla, NCBS Bangalore 2014.
#
# Commentary:
#
# This loads in a low-detail model incorporating
# reac-diff and elec signaling in neurons. The reac-diff model
# has just Ca and CaM in it, and there are no-cross-compartment
# reactions though Ca diffuses everywhere. The elec model controls the
# Ca levels in the chem compartments.
# This version uses solvers for both chem and electrical parts.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
# Code:
import sys
sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import math
import numpy
import matplotlib.pyplot as plt
import moose
import proto18
EREST_ACT = -70e-3
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_K_AHP()
proto18.make_K_C()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
proto18.make_glu()
proto18.make_NMDA()
proto18.make_Ca_NMDA()
proto18.make_NMDA_Ca_conc()
proto18.make_axon()
moose.setCwe( '/library' )
model = moose.Neutral( '/model' )
cellId = moose.loadModel( 'ca1_asym.p', '/model/elec', "Neutral" )
return cellId
def loadChem( diffLength ):
chem = moose.Neutral( '/model/chem' )
neuroCompt = moose.NeuroMesh( '/model/chem/kinetics' )
neuroCompt.separateSpines = 1
neuroCompt.geometryPolicy = 'cylinder'
spineCompt = moose.SpineMesh( '/model/chem/compartment_1' )
moose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' )
psdCompt = moose.PsdMesh( '/model/chem/compartment_2' )
#print 'Meshvolume[neuro, spine, psd] = ', neuroCompt.mesh[0].volume, spineCompt.mesh[0].volume, psdCompt.mesh[0].volume
moose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' )
modelId = moose.loadModel( 'minimal.g', '/model/chem', 'ee' )
#modelId = moose.loadModel( 'psd_merged31d.g', '/model/chem', 'ee' )
neuroCompt.name = 'dend'
spineCompt.name = 'spine'
psdCompt.name = 'psd'
def makeNeuroMeshModel():
diffLength = 10e-6 # Aim for 2 soma compartments.
elec = loadElec()
loadChem( diffLength )
neuroCompt = moose.element( '/model/chem/dend' )
neuroCompt.diffLength = diffLength
neuroCompt.cellPortion( elec, '/model/elec/#' )
for x in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if (x.diffConst > 0):
x.diffConst = 1e-11
for x in moose.wildcardFind( '/model/chem/##/Ca' ):
x.diffConst = 1e-10
# Put in dend solvers
ns = neuroCompt.numSegments
ndc = neuroCompt.numDiffCompts
print 'ns = ', ns, ', ndc = ', ndc
assert( neuroCompt.numDiffCompts == neuroCompt.mesh.num )
assert( ns == 36 ) #
assert( ndc == 278 ) #
nmksolve = moose.Ksolve( '/model/chem/dend/ksolve' )
nmdsolve = moose.Dsolve( '/model/chem/dend/dsolve' )
nmstoich = moose.Stoich( '/model/chem/dend/stoich' )
nmstoich.compartment = neuroCompt
nmstoich.ksolve = nmksolve
nmstoich.dsolve = nmdsolve
nmstoich.path = "/model/chem/dend/##"
print 'done setting path, numPools = ', nmdsolve.numPools
assert( nmdsolve.numPools == 1 )
assert( nmdsolve.numAllVoxels == ndc )
assert( nmstoich.numAllPools == 1 )
# oddly, numLocalFields does not work.
ca = moose.element( '/model/chem/dend/DEND/Ca' )
assert( ca.numData == ndc )
# Put in spine solvers. Note that these get info from the neuroCompt
spineCompt = moose.element( '/model/chem/spine' )
sdc = spineCompt.mesh.num
print 'sdc = ', sdc
assert( sdc == 13 )
smksolve = moose.Ksolve( '/model/chem/spine/ksolve' )
smdsolve = moose.Dsolve( '/model/chem/spine/dsolve' )
smstoich = moose.Stoich( '/model/chem/spine/stoich' )
smstoich.compartment = spineCompt
smstoich.ksolve = smksolve
smstoich.dsolve = smdsolve
smstoich.path = "/model/chem/spine/##"
print 'spine num Pools = ', smstoich.numAllPools
assert( smstoich.numAllPools == 3 )
assert( smdsolve.numPools == 3 )
assert( smdsolve.numAllVoxels == sdc )
# Put in PSD solvers. Note that these get info from the neuroCompt
psdCompt = moose.element( '/model/chem/psd' )
pdc = psdCompt.mesh.num
assert( pdc == 13 )
pmksolve = moose.Ksolve( '/model/chem/psd/ksolve' )
pmdsolve = moose.Dsolve( '/model/chem/psd/dsolve' )
pmstoich = moose.Stoich( '/model/chem/psd/stoich' )
pmstoich.compartment = psdCompt
pmstoich.ksolve = pmksolve
pmstoich.dsolve = pmdsolve
pmstoich.path = "/model/chem/psd/##"
assert( pmstoich.numAllPools == 3 )
assert( pmdsolve.numPools == 3 )
assert( pmdsolve.numAllVoxels == pdc )
foo = moose.element( '/model/chem/psd/Ca' )
print 'PSD: numfoo = ', foo.numData
print 'PSD: numAllVoxels = ', pmksolve.numAllVoxels
# Put in junctions between the diffusion solvers
nmdsolve.buildNeuroMeshJunctions( smdsolve, pmdsolve )
"""
CaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' )
print 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume
CaNspine = moose.vec( '/model/chem/spine/SPINE/CaN_BULK/CaN' )
print 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume
"""
##################################################################
# set up adaptors
aCa = moose.Adaptor( '/model/chem/spine/adaptCa', sdc )
adaptCa = moose.vec( '/model/chem/spine/adaptCa' )
chemCa = moose.vec( '/model/chem/spine/Ca' )
#print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( adaptCa ) == sdc )
assert( len( chemCa ) == sdc )
for i in range( sdc ):
elecCa = moose.element( '/model/elec/spine_head_14_' + str(i+1) + '/NMDA_Ca_conc' )
#print elecCa
moose.connect( elecCa, 'concOut', adaptCa[i], 'input', 'Single' )
moose.connect( adaptCa, 'output', chemCa, 'setConc', 'OneToOne' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 1e-4 # 520 to 0.0052 mM
#print adaptCa.outputOffset
moose.le( '/model/chem/dend/DEND' )
compts = neuroCompt.elecComptList
begin = neuroCompt.startVoxelInCompt
end = neuroCompt.endVoxelInCompt
aCa = moose.Adaptor( '/model/chem/dend/DEND/adaptCa', len( compts))
adaptCa = moose.vec( '/model/chem/dend/DEND/adaptCa' )
chemCa = moose.vec( '/model/chem/dend/DEND/Ca' )
#print 'aCa = ', aCa, ' foo = ', foo, "len( ChemCa ) = ", len( chemCa ), ", numData = ", chemCa.numData, "len( adaptCa ) = ", len( adaptCa )
assert( len( chemCa ) == ndc )
for i in zip( compts, adaptCa, begin, end ):
name = i[0].path + '/Ca_conc'
if ( moose.exists( name ) ):
elecCa = moose.element( name )
#print i[2], i[3], ' ', elecCa
#print i[1]
moose.connect( elecCa, 'concOut', i[1], 'input', 'Single' )
for j in range( i[2], i[3] ):
moose.connect( i[1], 'output', chemCa[j], 'setConc', 'Single' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 0.00008 # 80 nM offset in chem.
adaptCa.scale = 20e-6 # 10 arb units to 2 uM.
def addPlot( objpath, field, plot ):
#assert moose.exists( objpath )
if moose.exists( objpath ):
tab = moose.Table( '/graphs/' + plot )
obj = moose.element( objpath )
if obj.className == 'Neutral':
print "addPlot failed: object is a Neutral: ", objpath
return moose.element( '/' )
else:
#print "object was found: ", objpath, obj.className
moose.connect( tab, 'requestOut', obj, field )
return tab
else:
print "addPlot failed: object not found: ", objpath
return moose.element( '/' )
def makeCaPlots():
graphs = moose.Neutral( '/graphs' )
ca = moose.Neutral( '/graphs/ca' )
addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'ca/somaCa' )
addPlot( '/model/elec/lat_11_2/Ca_conc', 'getCa', 'ca/lat11Ca' )
addPlot( '/model/elec/spine_head_14_4/NMDA_Ca_conc', 'getCa', 'ca/spine4Ca' )
addPlot( '/model/elec/spine_head_14_12/NMDA_Ca_conc', 'getCa', 'ca/spine12Ca' )
def makeElecPlots():
graphs = moose.Neutral( '/graphs' )
elec = moose.Neutral( '/graphs/elec' )
addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' )
addPlot( '/model/elec/spine_head_14_4', 'getVm', 'elec/spineVm' )
def makeChemPlots():
graphs = moose.Neutral( '/graphs' )
chem = moose.Neutral( '/graphs/chem' )
addPlot( '/model/chem/psd/Ca_CaM', 'getConc', 'chem/psdCaCam' )
addPlot( '/model/chem/psd/Ca', 'getConc', 'chem/psdCa' )
addPlot( '/model/chem/spine/Ca_CaM', 'getConc', 'chem/spineCaCam' )
addPlot( '/model/chem/spine/Ca[3]', 'getConc', 'chem/spine4Ca' )
addPlot( '/model/chem/spine/Ca[11]', 'getConc', 'chem/spine12Ca' )
addPlot( '/model/chem/dend/DEND/Ca', 'getConc', 'chem/dendCa' )
addPlot( '/model/chem/dend/DEND/Ca[20]', 'getConc', 'chem/dendCa20' )
def makeGraphics():
plt.ion()
fig = plt.figure( figsize=(10,16) )
chem = fig.add_subplot( 411 )
chem.set_ylim( 0, 0.006 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'time (seconds)' )
plt.legend()
elec = fig.add_subplot( 412 )
plt.ylabel( 'Vm (V)' )
plt.xlabel( 'time (seconds)' )
plt.legend()
ca = fig.add_subplot( 413 )
plt.ylabel( '[Ca] (mM)' )
plt.xlabel( 'time (seconds)' )
plt.legend()
lenplot = fig.add_subplot( 414 )
plt.ylabel( 'Ca (mM )' )
plt.xlabel( 'Voxel#)' )
plt.legend()
spineCa = moose.vec( '/model/chem/spine/Ca' )
dendCa = moose.vec( '/model/chem/dend/DEND/Ca' )
line1, = lenplot.plot( range( len( spineCa ) ), spineCa.conc, label='spine' )
line2, = lenplot.plot( range( len( dendCa ) ), dendCa.conc, label='dend' )
Ca = [ x.Ca * 0.0001 for x in moose.wildcardFind( '/model/elec/##[ISA=CaConcBase]') ]
line3, = lenplot.plot( range( len( Ca ) ), Ca, label='elec' )
spineCaM = moose.vec( '/model/chem/spine/Ca_CaM' )
line4, = lenplot.plot( range( len( spineCaM ) ), spineCaM.conc, label='spineCaM' )
psdCaM = moose.vec( '/model/chem/psd/Ca_CaM' )
line5, = lenplot.plot( range( len( psdCaM ) ), psdCaM.conc, label='psdCaM' )
lenplot.set_ylim( 0, 0.01 )
fig.canvas.draw()
return ( chem, elec, ca, lenplot, fig, line1, line2, line3, line4, line5 )
def updateGraphics( plotlist ):
spineCa = moose.vec( '/model/chem/spine/Ca' )
dendCa = moose.vec( '/model/chem/dend/DEND/Ca' )
plotlist[5].set_ydata( spineCa.conc )
plotlist[6].set_ydata( dendCa.conc )
ca = [ x.Ca * 0.0001 for x in moose.wildcardFind( '/model/elec/##[ISA=CaConcBase]') ]
plotlist[7].set_ydata( ca )
spineCaM = moose.vec( '/model/chem/spine/Ca_CaM' )
plotlist[8].set_ydata( spineCaM.conc )
psdCaM = moose.vec( '/model/chem/psd/Ca_CaM' )
plotlist[9].set_ydata( psdCaM.conc )
plotlist[4].canvas.draw()
def finalizeGraphics( plotlist, cPlotDt, ePlotDt ):
for x in moose.wildcardFind( '/graphs/chem/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * cPlotDt
line1, = plotlist[0].plot( pos, x.vector, label=x.name )
for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = plotlist[1].plot( pos, x.vector, label=x.name )
for x in moose.wildcardFind( '/graphs/ca/#[ISA=Table]' ):
pos = numpy.arange( 0, x.vector.size, 1 ) * ePlotDt
line1, = plotlist[2].plot( pos, x.vector, label=x.name )
plotlist[4].canvas.draw()
raw_input()
def testNeuroMeshMultiscale():
runtime = 0.5
#elecDt = 0.2e-6
elecDt = 10e-6
chemDt = 0.0025
ePlotDt = 0.5e-3
cPlotDt = 0.0025
plotName = 'nm.plot'
makeNeuroMeshModel()
print "after model is completely done"
for i in moose.wildcardFind( '/model/chem/#/#/#/transloc#' ):
print i[0].name, i[0].Kf, i[0].Kb, i[0].kf, i[0].kb
makeChemPlots()
makeElecPlots()
makeCaPlots()
for i in range (10):
moose.setClock( i, elecDt )
for i in range ( 10, 20 ):
moose.setClock( i, chemDt )
moose.setClock( 8, ePlotDt )
moose.setClock( 18, cPlotDt )
moose.useClock( 8, '/graphs/elec/#,/graphs/ca/#', 'process' )
moose.useClock( 18, '/graphs/chem/#', 'process' )
hsolve = moose.HSolve( '/model/elec/hsolve' )
hsolve.dt = elecDt
hsolve.target = '/model/elec/compt'
plotlist = makeGraphics()
moose.reinit()
moose.element( '/model/elec/soma' ).inject = 2e-10
moose.element( '/model/chem/psd/Ca' ).concInit = 0.001
moose.element( '/model/chem/spine/Ca' ).concInit = 0.002
moose.element( '/model/chem/dend/DEND/Ca' ).concInit = 0.003
moose.reinit()
numDivs = 200
partialRuntime = runtime / numDivs
for i in range( numDivs ):
moose.start( partialRuntime )
updateGraphics( plotlist )
# moose.element( '/model/elec/soma' ).inject = 0
# moose.start( 0.25 )
finalizeGraphics( plotlist, cPlotDt, ePlotDt )
def main():
testNeuroMeshMultiscale()
if __name__ == '__main__':
main()
#
# minimal.py ends here.
| gpl-2.0 | 5,999,106,038,725,636,000 | 37.406593 | 141 | 0.62711 | false |
sfcta/dta | scripts/importCubeDemand.py | 2 | 9169 | __copyright__ = "Copyright 2011-2012 SFCTA"
__license__ = """
This file is part of DTA.
DTA is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DTA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DTA. If not, see <http://www.gnu.org/licenses/>.
"""
import getopt
import pdb
import dta
import os
import sys
import datetime
import csv
USAGE = r"""
python importCubeDemand.py [-f demand_profile_file] dynameq_net_dir dynameq_net_prefix
cubeVehicleClass output_demand_table startTime endTime
cube_demand_table1 startTime1 endTime1 timeStep1 demand_portion1
[cube_demand_table2 startTime2 endTime2 timeStep2 demand_portion2]
[cube_demand_table3 startTime3 endTime3 timeStep3 demand_portion3]
...
e.g.
python %DTA_CODE_DIR%\scripts\importCubeDemand.py -f Y:\dta\SanFrancisco\2010\demand\DemandProfile.csv . sf_stops
Car_NoToll demand_Car_NoToll.dat 14:30 19:30
Y:\dta\SanFrancisco\2010\demand\SanFranciscoSubArea_2010_MD.csv 14:30 15:30 01:00 0.13364
Y:\dta\SanFrancisco\2010\demand\SanFranciscoSubArea_2010_PM.csv 15:30 18:30 03:00 1.00
Y:\dta\SanFrancisco\2010\demand\SanFranciscoSubArea_2010_EV.csv 18:30 19:30 01:00 0.22594
****IMPORTANT****
Input Demand tables must be input in chronological order with the earliest start time first,
and they must have non-overlapping time periods.
*****************
The example command above will construct a output a Dynameq ascii demand file, demand_Car_NoToll.dat,
covering 14:30-19:30 for the vehicle class "Car_NoToll".
The DTA network and scenario for this table will be read from the current directory and have the
prefix "sf_stops".
The demand will derived from three different input (Cube) demand files:
0.13364 of the demand from SanFranciscoSubArea_2010_MD.csv will be used for the 14:30-15:30 period,
1.0 of the demand from SanFranciscoSubArea_2010_PM.csv will be used for the 15:30-18:30 period, and
0.22594 of the demand from SanFranciscoSubArea_2010_EV.csv will be used for the 18:30-19:30 period.
Further, if a demand_profile_file is passed, then any portion of the demand can be further peaked or
distributed non-uniformly. The demand_profile_file is a csv file with the following columns:
Start Time, End Time, Factor 1, Factor 2, Factor 3,...
If a row is specified matching the start and end time of one of the input demand files, then the demand
will be distributed according to the factors. The sum of the factors must add to 1. When this is
included, then the timeStep specified with the input demand file will be ignored, and the timeStep for
this demand period will instead be the timeperiod for the demand period divided by the number of time
factors. So in the given example, the contents of the DemandProfile.csv are:
Start Time,End Time,Factor 1,Factor 2,Factor 3,Factor 4,Factor 5,Factor 6
15:30,18:30,0.15173,0.15772,0.1679,0.17848,0.17492,0.16925
So the timestep for the 15:30-16:30 period will be (3 hours / 6 periods) = 30 minutes, and
not 3 hours as specified by timeStep2=03:00.
"""
if __name__ == "__main__":
optlist, args = getopt.getopt(sys.argv[1:], "f:")
if len(args) < 11:
print USAGE
sys.exit(2)
INPUT_DYNAMEQ_NET_DIR = args[0]
INPUT_DYNAMEQ_NET_PREFIX = args[1]
CUBE_VEH_CLASS = args[2]
OUTPUT_DYNAMEQ_TABLE = args[3]
START_TIME = args[4]
END_TIME = args[5]
if optlist:
for (opt,arg) in optlist:
if opt=="-f":
DEMAND_PROFILE_FILE = arg
else:
DEMAND_PROFILE_FILE = None
dta.VehicleType.LENGTH_UNITS= "feet"
dta.Node.COORDINATE_UNITS = "feet"
dta.RoadLink.LENGTH_UNITS = "miles"
dta.setupLogging("importCubeDemand.INFO.log", "importCubeDemand.DEBUG.log", logToConsole=True)
outputStream = open(OUTPUT_DYNAMEQ_TABLE, "w")
scenario = dta.DynameqScenario()
scenario.read(INPUT_DYNAMEQ_NET_DIR, INPUT_DYNAMEQ_NET_PREFIX)
net = dta.DynameqNetwork(scenario)
net.read(INPUT_DYNAMEQ_NET_DIR, INPUT_DYNAMEQ_NET_PREFIX)
startTime = dta.Utils.Time.readFromString(START_TIME)
endTime = dta.Utils.Time.readFromString(END_TIME)
# Read in the demand profile(s) if an input file was provided
factorsStart = []
if DEMAND_PROFILE_FILE:
factorsEnd = []
factorsList = []
factorsLists = []
factorNum = 0
inputStream = open(DEMAND_PROFILE_FILE, "r")
for record in csv.DictReader(inputStream):
factorsList = []
factorsStart.append(dta.Utils.Time.readFromString(record["Start Time"]))
factorsEnd.append(dta.Utils.Time.readFromString(record["End Time"]))
ii = 1
factorNum = record["Factor %d" % ii]
while factorNum:
factorsList.append(factorNum)
ii += 1
factorNum = record["Factor %d" % ii]
factorsLists.append(factorsList)
# Check to make sure that demand is within the scenario time. Exit if not.
if startTime < scenario.startTime:
dta.DtaLogger.error("Demand cannot start before scenario start time.")
dta.DtaLogger.error("Demand start = %s, Scenario start = %s" %
(startTime.strftime("%H:%M"), scenario.startTime.strftime("%H:%M")))
sys.exit(2)
if endTime > scenario.endTime:
dta.DtaLogger.error("Demand cannot end after scenario end time.")
dta.DtaLogger.error("Demand end = %s, Scenario end = %s" %
(endTime.strftime("%H:%M"), scenario.endTime.strftime("%H:%M")))
sys.exit(2)
# Create and write out demand for each table in the correct order (earliest first and getting continualy later.)
dta.Demand.writeDynameqDemandHeader(outputStream, startTime, endTime, CUBE_VEH_CLASS)
numDemandTables = (len(args)-5)/5
for ii in range(0,numDemandTables):
CUBE_TABLE = args[6+(ii*5)]
START_TIME_N = args[7+(ii*5)]
END_TIME_N = args[8+(ii*5)]
TIME_STEP = args[9+(ii*5)]
DEMAND_PORTION = args[10+(ii*5)]
# Check to be sure time is continuous
if ii == 0:
if dta.Utils.Time.readFromString(START_TIME_N) != startTime:
dta.DtaLogger.error("Start time of first demand period (%s) must equal demand start time %s." %
(START_TIME_N, startTime.strftime("%H:%M")))
sys.exit(2)
elif ii > 0 and ii < numDemandTables-1:
if dta.Utils.Time.readFromString(START_TIME_N) != endTime_n:
dta.DtaLogger.error("Start time of demand period %d does not equal end time of demand period %d." % (ii+1, ii))
sys.exit(2)
elif ii > 0 and ii == numDemandTables-1:
if dta.Utils.Time.readFromString(END_TIME_N) != endTime:
dta.DtaLogger.error("End time of last demand period (%s) must equal demand end time %s." %
(END_TIME_N, endTime.strftime("%H:%M")))
sys.exit(2)
# Set start time, end time, and time step for the demand period
startTime_n = dta.Utils.Time.readFromString(START_TIME_N)
endTime_n = dta.Utils.Time.readFromString(END_TIME_N)
timeStep = dta.Utils.Time.readFromString(TIME_STEP)
# Check to see if demand period has a demand profile
demProf = 0
for jj in range(0,len(factorsStart)):
if startTime_n == factorsStart[jj] and endTime_n == factorsEnd[jj]:
demProf = 1
FactorsList = factorsLists[jj]
# Read in cube demand table, apply time of day factors (if applicable) and write demand out to OUTPUT_DYNAMEQ_TABLE
if demProf == 1:
timeStep = endTime_n - startTime_n
demand = dta.Demand.readCubeODTable(CUBE_TABLE, net, CUBE_VEH_CLASS,
startTime_n, endTime_n, timeStep, float(DEMAND_PORTION))
demand = demand.applyTimeOfDayFactors(FactorsList)
else:
demand = dta.Demand.readCubeODTable(CUBE_TABLE, net, CUBE_VEH_CLASS,
startTime_n, endTime_n, timeStep, float(DEMAND_PORTION))
demand.writeDynameqTable(outputStream)
dta.DtaLogger.info("Wrote %10.2f %-10s to %s" % (demand.getTotalNumTrips(), "TRIPS", OUTPUT_DYNAMEQ_TABLE))
outputStream.close()
| gpl-3.0 | -1,559,328,016,987,186,200 | 39.570796 | 127 | 0.637147 | false |
davidzyx/PythonNotes | Part II/ch07_notes.py | 1 | 9675 | # ch07_notes.py
# Chapter 7 notes taken from Automate the Boring Stuff with Python (2015).pdf
# Created by Davidzz on 7/26/2016
# Finding Patterns of Text
# Without Regular Expressions:
def isPhoneNumber(text):
if len(text) != 13:
return False
for i in range(0, 3):
if not text[i].isdecimal():
return False
if text[3] != '-':
return False
for i in range(4, 8):
if not text[i].isdecimal():
return False
if text[8] != '-':
return False
for i in range(9, 13):
if not text[i].isdecimal():
return False
return True
message = 'Call me at 136-9135-5762 tomorrow. 139-6323-4580 is my office.'
for i in range(len(message)):
chunk = message[i:i+13]
if isPhoneNumber(chunk):
print('Phone number found: ' + chunk)
print('Done')
# with Regular Expressions:
import re
# remember to use raw string (r'spam') since \ is often use as an escape character
phoneNumRegex = re.compile(r'\d\d\d-\d\d\d\d-\d\d\d\d')
mo = phoneNumRegex.search('My number is 136-9135-5762.')
print('Phone number found: ' + mo.group())
# in one line:
print(re.compile(r'\d\d\d-\d\d\d\d-\d\d\d\d').search('My number is 136-9135-5762.').group())
# grouping ()
phoneNumRegex = re.compile(r'(\d\d\d)-(\d\d\d\d-\d\d\d\d)')
mo = phoneNumRegex.search('My number is 137-1858-1328.')
print(mo.group(1)) # '136'
print(mo.group(2)) # '9135-5762'
print(mo.group(0)) # '136-9135-5762'
print(mo.group()) # '136-9135-5762'
print(mo.groups()) # ('136', '9135-5762') tuple!
areaCode, mainNumber = mo.groups() # multiple assignment
# REs with parentheses \( \)
phoneNumRegex = re.compile(r'(\(\d\d\d\)) (\d\d\d-\d\d\d\d)')
mo = phoneNumRegex.search('My phone number is (415) 555-4242.')
print(mo.group(1)) # '(415)'
print(mo.group(2)) # '555-4242'
# Matching Multiple Groups with the Pipe |
heroRegex = re.compile (r'Batman|Tina Fey')
mo1 = heroRegex.search('Batman and Tina Fey.')
print(mo1.group()) # 'Batman'
mo2 = heroRegex.search('Tina Fey and Batman.')
print(mo2.group()) # 'Tina Fey'
batRegex = re.compile(r'Bat(man|mobile|copter|bat)')
mo = batRegex.search('Batmobile lost a wheel')
print(mo.group()) # 'Batmobile'
print(mo.group(1)) # 'mobile'
# Optionals ()?
batRegex = re.compile(r'Bat(wo)?man')
mo1 = batRegex.search('The Adventures of Batman')
print(mo1.group()) # 'Batman'
mo2 = batRegex.search('The Adventures of Batwoman')
print(mo2.group()) # 'Batwoman'
phoneRegex = re.compile(r'(\d\d\d-)?\d\d\d-\d\d\d\d')
mo1 = phoneRegex.search('My number is 415-555-4242')
print(mo1.group()) # '415-555-4242'
mo2 = phoneRegex.search('My number is 555-4242')
print(mo2.group()) # '555-4242'
# Matching Zero or More with the Star ()*
batRegex = re.compile(r'Bat(wo)*man')
mo1 = batRegex.search('The Adventures of Batman')
print(mo1.group()) # 'Batman'
mo2 = batRegex.search('The Adventures of Batwoman')
print(mo2.group()) # 'Batwoman'
mo3 = batRegex.search('The Adventures of Batwowowowoman')
print(mo3.group()) # 'Batwowowowoman'
# Matching One or More with the Plus ()+
batRegex = re.compile(r'Bat(wo)+man')
mo1 = batRegex.search('The Adventures of Batwoman')
print(mo1.group()) # 'Batwoman'
mo2 = batRegex.search('The Adventures of Batwowowowoman')
print(mo2.group()) # 'Batwowowowoman'
mo3 = batRegex.search('The Adventures of Batman')
print(mo3 == None) # True
# Matching Specific Repetitions with Curly Brackets (){number/expression}
# (Ha){3}
# (Ha)(Ha)(Ha)
# (Ha){3,5} `# omiting one of the variable will make it open interval, min == 0
# (Ha)(Ha)(Ha))|((Ha)(Ha)(Ha)(Ha))|((Ha)(Ha)(Ha)(Ha)(Ha))
# Greedy and Nongreedy Matching
# default: greedy - return first longest match
# nongreedy - return first shortest match
greedyHaRegex = re.compile(r'(Ha){3,5}')
mo1 = greedyHaRegex.search('HaHaHaHaHa')
print(mo1.group()) # 'HaHaHaHaHa'
nongreedyHaRegex = re.compile(r'(Ha){3,5}?')
mo2 = nongreedyHaRegex.search('HaHaHaHaHa')
print(mo2.group()) # 'HaHaHa'
# findall()
phoneNumRegex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d') # has no groups
print(phoneNumRegex.findall('Cell: 415-555-9999 Work: 212-555-0000'))
# ['415-555-9999', '212-555-0000']
phoneNumRegex = re.compile(r'(\d\d\d)-(\d\d\d)-(\d\d\d\d)') # has groups
print(phoneNumRegex.findall('Cell: 415-555-9999 Work: 212-555-0000'))
# [('415', '555', '9999'), ('212', '555', '0000')]
# Shorthand Codes for Common Character Classes
# class Represents
# \d Any numeric digit from 0 to 9.
# \D Any character that is not a numeric digit from 0 to 9.
# \w Any letter, numeric digit, or the underscore character. (Think of this as matching "word" characters.)
# \W Any character that is not a letter, numeric digit, or the underscore character.
# \s Any space, tab, or newline character. (Think of this as matching "space" characters.)
# \S Any character that is not a space, tab, or newline.
# Custom characters []
vowelRegex = re.compile(r'[aeiouAEIOU]')
print(vowelRegex.findall('Robo op eats baby food. BABY FOOD.'))
# ['o', 'o', 'o', 'e', 'a', 'a', 'o', 'o', 'A', 'O', 'O']
consonantRegex = re.compile(r'[^aeiouAEIOU]') # ^ stands for NOT
consonantRegex.findall('Robo op eats baby food. BABY FOOD.')
# ['R', 'b', 'c', 'p', ' ', 't', 's', ' ', 'b', 'b', 'y', ' ', 'f', 'd', '.', ' ', 'B', 'B', 'Y', ' ', 'F', 'D', '.']
# must start with or end with ^ $
# ^ :
beginsWithHello = re.compile(r'^Hello')
print(beginsWithHello.search('Hello world!'))
# <_sre.SRE_Match object; span=(0, 5), match='Hello'>
print(beginsWithHello.search('He said hello.') == None) # True
# $ :
endsWithNumber = re.compile(r'\d$')
print(endsWithNumber.search('Your number is 42'))
# <_sre.SRE_Match object; span=(16, 17), match='2'>
print(endsWithNumber.search('Your number is forty two.') == None) # True
# borh ^ and $ :
wholeStringIsNum = re.compile(r'^\d+$')
print(wholeStringIsNum.search('1234567890'))
# <_sre.SRE_Match object; span=(0, 10), match='1234567890'>
# The Wildcard Character . matches anything except newline
atRegex = re.compile(r'.at')
atRegex.findall('The cat in the hat sat on the flat mat.')
# ['cat', 'hat', 'sat', 'lat', 'mat']
# Matching Everything with Dot-Star .*
nameRegex = re.compile(r'First Name: (.*) Last Name: (.*)')
mo = nameRegex.search('First Name: Al Last Name: Sweigart')
print(mo.group(1)) # 'Al'
print(mo.group(2)) # 'Sweigart'
# using .*? enables nongreedy search
nongreedyRegex = re.compile(r'<.*?>')
mo = nongreedyRegex.search('<To serve man> for dinner.>')
print(mo.group()) # '<To serve man>'
# matching longer one
greedyRegex = re.compile(r'<.*>')
mo = greedyRegex.search('<To serve man> for dinner.>')
print(mo.group()) # '<To serve man> for dinner.>'
# Matching Newlines with the Dot Character ('.*', re.DOTALL)
noNewlineRegex = re.compile('.*')
print(noNewlineRegex.search('Serve the public trust.\nProtect the innocent. \nUphold the law.').group())
# 'Serve the public trust.'
newlineRegex = re.compile('.*', re.DOTALL)
print(newlineRegex.search('Serve the public trust.\nProtect the innocent. \nUphold the law.').group())
# 'Serve the public trust.\nProtect the innocent.\nUphold the law.'
# Summary of Regex symbols
# This chapter covered a lot of notation, so here's a quick review of what you learned:
# ` The ? matches zero or one of the preceding group.
# ` The * matches zero or more of the preceding group.
# ` The + matches one or more of the preceding group.
# ` The {n} matches exactly n of the preceding group.
# ` The {n,} matches n or more of the preceding group.
# ` The {,m} matches 0 to m of the preceding group.
# ` The {n,m} matches at least n and at most m of the preceding group.
# ` {n,m}? or *? or +? performs a nongreedy match of the preceding group.
# ` ^spam means the string must begin with spam.
# ` spam$ means the string must end with spam.
# ` The . matches any character, except newline characters.
# ` \d, \w, and \s match a digit, word, or space character, respectively.
# ` \D, \W, and \S match anything except a digit, word, or space character, respectively.
# ` [abc] matches any character between the brackets (such as a, b, or c).
# ` [^abc] matches any character that isn't between the brackets.
# Case-Insensitive Matching re.IGNORECASE or re.I
robocop = re.compile(r'robocop', re.IGNORECASE)
print(robocop.search('RoboCop is part man, part machine, all cop.').group())
# 'Robo op'
# Substituting Strings with the sub() Method
namesRegex = re.compile(r'Agent \w+')
print(namesRegex.sub('CENSORED', 'Agent Alice gave the secret documents to Agent Bob.'))
# 'CENSORED gave the secret documents to CENSORED.'
# advanced substituting
agentNamesRegex = re.compile(r'Agent (\w)\w*')
print(agentNamesRegex.sub(r'\1****', 'Agent Alice told Agent Carol that Agent Eve knew Agent Bob was a double agent.'))
# A**** told C**** that E**** knew B**** was a double agent.'
# Managing Complex Regexes
phoneRegex = re.compile(r'((\d{3}|\(\d{3}\))?(\s|-|\.)?\d{3}(\s|-|\.)\d{4}(\s*(ext|x|ext.)\s*\d{2,5})?)')
phoneRegex = re.compile(r'''(
(\d{3}|\(\d{3}\))? # area code
(\s|-|\.)? # separator
\d{3} # first 3 digits
(\s|-|\.) # separator
\d{4} # last 4 digits
(\s*(ext|x|ext.)\s*\d{2,5})? # extension
)''', re.VERBOSE)
# Combining re.IGNORECASE, re.DOTALL, and re.VERBOSE |
someRegexValue = re.compile('foo', re.IGNORECASE | re.DOTALL)
someRegexValue = re.compile('foo', re.IGNORECASE | re.DOTALL | re.VERBOSE)
| gpl-3.0 | 7,259,667,673,570,413,000 | 41.434211 | 119 | 0.642687 | false |
stxnext-kindergarten/presence-analyzer-dczuba | src/presence_analyzer/decorators.py | 1 | 1346 | # -*- coding: utf-8 -*-
"""
Decorators
"""
from functools import wraps
from datetime import datetime, timedelta
from threading import Lock
import logging
from presence_analyzer.helpers import generate_cache_key
log = logging.getLogger(__name__) # pylint: disable=C0103
def cache(time=60*60):
"""
Cache in local mem for given time
"""
# structure:
# indexes are generated keys
# value is dict: {'valid_till': <datetime.datetime>, 'data': <dict>}
cached_data = {}
lock = Lock()
def decorator(func):
@wraps(func)
def wrapped_function(*args, **kwargs):
""" Wrapper """
key = generate_cache_key(func, args, kwargs)
refresh_key = (
key not in cached_data or
(cached_data[key]['valid_till']-datetime.now()).seconds <= 0
)
if refresh_key:
log.debug('Refreshing cache for %s' % key)
with lock:
cached_data[key] = {
'valid_till': datetime.now()+timedelta(seconds=time),
'data': func(*args, **kwargs)
}
else:
log.debug('Retrieving from cache %s' % key)
return cached_data[key]['data']
return wrapped_function
return decorator
| mit | 499,990,105,168,777,500 | 25.92 | 77 | 0.535661 | false |
nirs/vdsm | lib/vdsm/supervdsm_api/virt.py | 2 | 7344 | # Copyright 2016-2021 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from __future__ import division
import logging
import os
import stat
import tempfile
import uuid
from vdsm.constants import P_LIBVIRT_VMCHANNELS, P_OVIRT_VMCONSOLES
from vdsm.storage.fileUtils import resolveGid
from vdsm.virt import filedata
from vdsm.common import exception
from vdsm.common import password
from vdsm.common.fileutils import parse_key_val_file
from . import expose
@expose
def prepareVmChannel(socketFile, group=None):
if (socketFile.startswith(P_LIBVIRT_VMCHANNELS) or
socketFile.startswith(P_OVIRT_VMCONSOLES)):
fsinfo = os.stat(socketFile)
mode = fsinfo.st_mode | stat.S_IWGRP
os.chmod(socketFile, mode)
if group is not None:
os.chown(socketFile,
fsinfo.st_uid,
resolveGid(group))
else:
raise Exception("Incorporate socketFile")
@expose
def hugepages_alloc(count, path):
"""
Function to allocate hugepages. Thread-safety not guaranteed.
The default size depends on the architecture:
x86_64: 2 MiB
POWER8: 16 MiB
Args:
count (int): Number of huge pages to be allocated. Negative count
deallocates pages.
Returns:
int: The number of successfully allocated hugepages.
"""
existing_pages = 0
allocated_pages = 0
with open(path, 'r') as f:
existing_pages = int(f.read())
count = max(-existing_pages, count)
with open(path, 'w') as f:
f.write(str(existing_pages + count))
with open(path, 'r') as f:
allocated_pages = int(f.read()) - existing_pages
return allocated_pages
@expose
def mdev_create(device, mdev_type, mdev_uuid=None):
"""Create the desired mdev type.
Args:
device: PCI address of the parent device in the format
(domain:bus:slot.function). Example: 0000:06:00.0.
mdev_type: Type to be spawned. Example: nvidia-11.
mdev_uuid: UUID for the spawned device. Keeping None generates a new
UUID.
Returns:
UUID (string) of the created device.
Raises:
Possibly anything related to sysfs write (IOError).
"""
path = os.path.join(
'/sys/class/mdev_bus/{}/mdev_supported_types/{}/create'.format(
device, mdev_type
)
)
if mdev_uuid is None:
mdev_uuid = str(uuid.uuid4())
with open(path, 'w') as f:
f.write(mdev_uuid)
return mdev_uuid
@expose
def mdev_delete(device, mdev_uuid):
"""
Args:
device: PCI address of the parent device in the format
(domain:bus:slot.function). Example: 0000:06:00.0.
mdev_type: Type to be spawned. Example: nvidia-11.
mdev_uuid: UUID for the spawned device. Keeping None generates a new
UUID.
Raises:
Possibly anything related to sysfs write (IOError).
"""
path = os.path.join(
'/sys/class/mdev_bus/{}/{}/remove'.format(
device, mdev_uuid
)
)
with open(path, 'w') as f:
f.write('1')
QEMU_CONFIG_FILE = '/etc/libvirt/qemu.conf'
@expose
def check_qemu_conf_contains(key, value):
"""
Checks if qemu.conf contains the given key-value config.
"""
try:
kvs = parse_key_val_file(QEMU_CONFIG_FILE)
return kvs.get(key, '0') == value
except:
logging.error('Could not check %s for %s', QEMU_CONFIG_FILE, key)
# re-raised exception will be logged, no need to log it here
raise
@expose
def read_tpm_data(vm_id, last_modified):
"""
Return TPM data of the given VM.
If data is not newer than `last_modified`, return None.
In addition to data, the last detected data modification time is
returned; the returned data may be newer, but never older than the
returned time.
:param vm_id: VM id
:type vm_id: string
:param last_modified: if data file system time stamp is not
newer than this time in seconds, None is returned
:type last_modified: float
:returns: tuple (DATA, MODIFIED) where DATA is encoded TPM data suitable to
use in `write_tpm_data()`, wrapped by `password.ProtectedPassword`,
or None, and MODIFIED is DATA modification time (which may be older than
actual modification time)
:rtype: tuple
"""
accessor = filedata.DirectoryData(filedata.tpm_path(vm_id),
compress=False)
currently_modified = accessor.last_modified()
data = accessor.retrieve(last_modified=last_modified)
return password.ProtectedPassword(data), currently_modified
@expose
def write_tpm_data(vm_id, tpm_data):
"""
Write TPM data for the given VM.
:param vm_id: VM id
:type vm_id: string
:param tpm_data: encoded TPM data as previously obtained from
`read_tpm_data()`
:type tpm_data: ProtectedPassword
"""
tpm_data = password.unprotect(tpm_data)
# Permit only archives with plain files and directories to prevent various
# kinds of attacks.
with tempfile.TemporaryDirectory() as d:
accessor = filedata.DirectoryData(os.path.join(d, 'check'))
accessor.store(tpm_data)
for root, dirs, files in os.walk(d):
for f in files:
path = os.path.join(root, f)
if not os.path.isfile(path):
logging.error("Special file in TPM data: %s", path)
raise exception.ExternalDataFailed(
reason="Cannot write TPM data with non-regular files",
path=path
)
# OK, write the data to the target location
accessor = filedata.DirectoryData(filedata.tpm_path(vm_id))
accessor.store(tpm_data)
@expose
def read_nvram_data(vm_id, last_modified):
accessor = filedata.FileData(filedata.nvram_path(vm_id))
currently_modified = accessor.last_modified()
data = accessor.retrieve(last_modified=last_modified)
return password.ProtectedPassword(data), currently_modified
@expose
def write_nvram_data(vm_id, nvram_data):
nvram_data = password.unprotect(nvram_data)
nvram_path = filedata.nvram_path(vm_id)
# Create the file with restricted permissions owned by root
if os.path.exists(nvram_path):
os.remove(nvram_path)
fd = os.open(
nvram_path, os.O_WRONLY | os.O_CREAT | os.O_EXCL, mode=0o600)
os.close(fd)
# Write content
accessor = filedata.FileData(nvram_path)
accessor.store(nvram_data)
| gpl-2.0 | 3,676,116,486,304,709,000 | 29.857143 | 79 | 0.652914 | false |
ghjm/ansible | lib/ansible/plugins/shell/powershell.py | 29 | 11352 | # Copyright (c) 2014, Chris Church <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: powershell
version_added: historical
short_description: Windows PowerShell
description:
- The only option when using 'winrm' or 'psrp' as a connection plugin.
- Can also be used when using 'ssh' as a connection plugin and the C(DefaultShell) has been configured to PowerShell.
extends_documentation_fragment:
- shell_windows
'''
import base64
import os
import re
import shlex
import pkgutil
import xml.etree.ElementTree as ET
import ntpath
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.shell import ShellBase
_common_args = ['PowerShell', '-NoProfile', '-NonInteractive', '-ExecutionPolicy', 'Unrestricted']
def _parse_clixml(data, stream="Error"):
"""
Takes a byte string like '#< CLIXML\r\n<Objs...' and extracts the stream
message encoded in the XML data. CLIXML is used by PowerShell to encode
multiple objects in stderr.
"""
lines = []
# There are some scenarios where the stderr contains a nested CLIXML element like
# '<# CLIXML\r\n<# CLIXML\r\n<Objs>...</Objs><Objs>...</Objs>'.
# Parse each individual <Objs> element and add the error strings to our stderr list.
# https://github.com/ansible/ansible/issues/69550
while data:
end_idx = data.find(b"</Objs>") + 7
current_element = data[data.find(b"<Objs "):end_idx]
data = data[end_idx:]
clixml = ET.fromstring(current_element)
namespace_match = re.match(r'{(.*)}', clixml.tag)
namespace = "{%s}" % namespace_match.group(1) if namespace_match else ""
strings = clixml.findall("./%sS" % namespace)
lines.extend([e.text.replace('_x000D__x000A_', '') for e in strings if e.attrib.get('S') == stream])
return to_bytes('\r\n'.join(lines))
class ShellModule(ShellBase):
# Common shell filenames that this plugin handles
# Powershell is handled differently. It's selected when winrm is the
# connection
COMPATIBLE_SHELLS = frozenset()
# Family of shells this has. Must match the filename without extension
SHELL_FAMILY = 'powershell'
_SHELL_REDIRECT_ALLNULL = '> $null'
_SHELL_AND = ';'
# Used by various parts of Ansible to do Windows specific changes
_IS_WINDOWS = True
# TODO: add binary module support
def env_prefix(self, **kwargs):
# powershell/winrm env handling is handled in the exec wrapper
return ""
def join_path(self, *args):
# use normpath() to remove doubled slashed and convert forward to backslashes
parts = [ntpath.normpath(self._unquote(arg)) for arg in args]
# Becuase ntpath.join treats any component that begins with a backslash as an absolute path,
# we have to strip slashes from at least the beginning, otherwise join will ignore all previous
# path components except for the drive.
return ntpath.join(parts[0], *[part.strip('\\') for part in parts[1:]])
def get_remote_filename(self, pathname):
# powershell requires that script files end with .ps1
base_name = os.path.basename(pathname.strip())
name, ext = os.path.splitext(base_name.strip())
if ext.lower() not in ['.ps1', '.exe']:
return name + '.ps1'
return base_name.strip()
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
path = self._unquote(path)
return path.endswith('/') or path.endswith('\\')
def chmod(self, paths, mode):
raise NotImplementedError('chmod is not implemented for Powershell')
def chown(self, paths, user):
raise NotImplementedError('chown is not implemented for Powershell')
def set_user_facl(self, paths, user, mode):
raise NotImplementedError('set_user_facl is not implemented for Powershell')
def remove(self, path, recurse=False):
path = self._escape(self._unquote(path))
if recurse:
return self._encode_script('''Remove-Item '%s' -Force -Recurse;''' % path)
else:
return self._encode_script('''Remove-Item '%s' -Force;''' % path)
def mkdtemp(self, basefile=None, system=False, mode=None, tmpdir=None):
# Windows does not have an equivalent for the system temp files, so
# the param is ignored
if not basefile:
basefile = self.__class__._generate_temp_dir_name()
basefile = self._escape(self._unquote(basefile))
basetmpdir = tmpdir if tmpdir else self.get_option('remote_tmp')
script = '''
$tmp_path = [System.Environment]::ExpandEnvironmentVariables('%s')
$tmp = New-Item -Type Directory -Path $tmp_path -Name '%s'
Write-Output -InputObject $tmp.FullName
''' % (basetmpdir, basefile)
return self._encode_script(script.strip())
def expand_user(self, user_home_path, username=''):
# PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
# not seem to work remotely, though by default we are always starting
# in the user's home directory.
user_home_path = self._unquote(user_home_path)
if user_home_path == '~':
script = 'Write-Output (Get-Location).Path'
elif user_home_path.startswith('~\\'):
script = "Write-Output ((Get-Location).Path + '%s')" % self._escape(user_home_path[1:])
else:
script = "Write-Output '%s'" % self._escape(user_home_path)
return self._encode_script(script)
def exists(self, path):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path '%s')
{
$res = 0;
}
Else
{
$res = 1;
}
Write-Output '$res';
Exit $res;
''' % path
return self._encode_script(script)
def checksum(self, path, *args, **kwargs):
path = self._escape(self._unquote(path))
script = '''
If (Test-Path -PathType Leaf '%(path)s')
{
$sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
$fp = [System.IO.File]::Open('%(path)s', [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
}
ElseIf (Test-Path -PathType Container '%(path)s')
{
Write-Output "3";
}
Else
{
Write-Output "1";
}
''' % dict(path=path)
return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, arg_path=None):
bootstrap_wrapper = pkgutil.get_data("ansible.executor.powershell", "bootstrap_wrapper.ps1")
# pipelining bypass
if cmd == '':
return self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
# non-pipelining
cmd_parts = shlex.split(cmd, posix=False)
cmd_parts = list(map(to_text, cmd_parts))
if shebang and shebang.lower() == '#!powershell':
if not self._unquote(cmd_parts[0]).lower().endswith('.ps1'):
# we're running a module via the bootstrap wrapper
cmd_parts[0] = '"%s.ps1"' % self._unquote(cmd_parts[0])
wrapper_cmd = "type " + cmd_parts[0] + " | " + self._encode_script(script=bootstrap_wrapper, strict_mode=False, preserve_rc=False)
return wrapper_cmd
elif shebang and shebang.startswith('#!'):
cmd_parts.insert(0, shebang[2:])
elif not shebang:
# The module is assumed to be a binary
cmd_parts[0] = self._unquote(cmd_parts[0])
cmd_parts.append(arg_path)
script = '''
Try
{
%s
%s
}
Catch
{
$_obj = @{ failed = $true }
If ($_.Exception.GetType)
{
$_obj.Add('msg', $_.Exception.Message)
}
Else
{
$_obj.Add('msg', $_.ToString())
}
If ($_.InvocationInfo.PositionMessage)
{
$_obj.Add('exception', $_.InvocationInfo.PositionMessage)
}
ElseIf ($_.ScriptStackTrace)
{
$_obj.Add('exception', $_.ScriptStackTrace)
}
Try
{
$_obj.Add('error_record', ($_ | ConvertTo-Json | ConvertFrom-Json))
}
Catch
{
}
Echo $_obj | ConvertTo-Json -Compress -Depth 99
Exit 1
}
''' % (env_string, ' '.join(cmd_parts))
return self._encode_script(script, preserve_rc=False)
def wrap_for_exec(self, cmd):
return '& %s; exit $LASTEXITCODE' % cmd
def _unquote(self, value):
'''Remove any matching quotes that wrap the given value.'''
value = to_text(value or '')
m = re.match(r'^\s*?\'(.*?)\'\s*?$', value)
if m:
return m.group(1)
m = re.match(r'^\s*?"(.*?)"\s*?$', value)
if m:
return m.group(1)
return value
def _escape(self, value):
'''Return value escaped for use in PowerShell single quotes.'''
# There are 5 chars that need to be escaped in a single quote.
# https://github.com/PowerShell/PowerShell/blob/b7cb335f03fe2992d0cbd61699de9d9aafa1d7c1/src/System.Management.Automation/engine/parser/CharTraits.cs#L265-L272
return re.compile(u"(['\u2018\u2019\u201a\u201b])").sub(u'\\1\\1', value)
def _encode_script(self, script, as_list=False, strict_mode=True, preserve_rc=True):
'''Convert a PowerShell script to a single base64-encoded command.'''
script = to_text(script)
if script == u'-':
cmd_parts = _common_args + ['-Command', '-']
else:
if strict_mode:
script = u'Set-StrictMode -Version Latest\r\n%s' % script
# try to propagate exit code if present- won't work with begin/process/end-style scripts (ala put_file)
# NB: the exit code returned may be incorrect in the case of a successful command followed by an invalid command
if preserve_rc:
script = u'%s\r\nIf (-not $?) { If (Get-Variable LASTEXITCODE -ErrorAction SilentlyContinue) { exit $LASTEXITCODE } Else { exit 1 } }\r\n'\
% script
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = to_text(base64.b64encode(script.encode('utf-16-le')), 'utf-8')
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
| gpl-3.0 | 4,865,950,606,119,699,000 | 38.554007 | 167 | 0.580162 | false |
adamreis/nyc-jazz | src/application/urls.py | 1 | 1283 | """
urls.py
URL dispatch route mappings and error handlers
"""
from flask import render_template
from application import app
from application import views
## URL dispatch rules
# App Engine warm up handler
# See http://code.google.com/appengine/docs/python/config/appconfig.html#Warming_Requests
app.add_url_rule('/_ah/warmup', 'warmup', view_func=views.warmup)
# Home page
app.add_url_rule('/', 'home', view_func=views.home, methods=['GET', 'POST'])
# Download all the shit
app.add_url_rule('/scrape', view_func=views.scrape_everything, methods=['GET', 'POST'])
app.add_url_rule('/scrape-smoke', view_func=views.scrape_smoke, methods=['GET', 'POST'])
app.add_url_rule('/scrape-freetime', view_func=views.scrape_freetime, methods=['GET', 'POST'])
# Email all the shit
app.add_url_rule('/email-test', view_func=views.email_test, methods=['GET'])
app.add_url_rule('/digest-send', view_func=views.digest_send, methods=['GET'])
# Unsubscribe user
app.add_url_rule('/unsubscribe/<identifier>', view_func=views.unsubscribe, methods=['GET'])
## Error handlers
# Handle 404 errors
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
# Handle 500 errors
@app.errorhandler(500)
def server_error(e):
return render_template('500.html'), 500
| mit | -9,157,879,621,468,564,000 | 28.837209 | 94 | 0.721746 | false |
spcui/virt-test | tests/lvm.py | 3 | 2933 | import os
import logging
from autotest.client.shared import error
@error.context_aware
def mount_lv(lv_path, session):
error.context("mounting ext3 filesystem made on logical volume %s" %
os.path.basename(lv_path))
session.cmd("mkdir -p /mnt/kvm_test_lvm")
session.cmd("mount %s /mnt/kvm_test_lvm" % lv_path)
@error.context_aware
def umount_lv(lv_path, session):
error.context("umounting ext3 filesystem made on logical volume %s" %
os.path.basename(lv_path))
session.cmd("umount %s" % lv_path)
session.cmd("rm -rf /mnt/kvm_test_lvm")
@error.context_aware
def run_lvm(test, params, env):
"""
KVM reboot test:
1) Log into a guest
2) Create a volume group and add both disks as pv to the Group
3) Create a logical volume on the VG
5) `fsck' to check the partition that LV locates
:param test: kvm test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
vg_name = "vg_kvm_test"
lv_name = "lv_kvm_test"
lv_path = "/dev/%s/%s" % (vg_name, lv_name)
disks = params.get("disks", "/dev/hdb /dev/hdc")
clean = params.get("clean", "yes")
timeout = params.get("lvm_timeout", "600")
try:
error.context("adding physical volumes %s" % disks, logging.info)
session.cmd("pvcreate %s" % disks)
error.context("creating a volume group out of %s" % disks,
logging.info)
session.cmd("vgcreate %s %s" % (vg_name, disks))
error.context("activating volume group %s" % vg_name)
session.cmd("vgchange -ay %s" % vg_name)
error.context("creating logical volume on volume group %s" % vg_name,
logging.info)
session.cmd("lvcreate -L2000 -n %s %s" % (lv_name, vg_name))
error.context(
"creating ext3 filesystem on logical volume %s" % lv_name)
session.cmd("yes | mkfs.ext3 %s" % lv_path, timeout=int(timeout))
mount_lv(lv_path, session)
umount_lv(lv_path, session)
error.context("checking ext3 filesystem made on logical volume %s" %
lv_name, logging.info)
session.cmd("fsck %s" % lv_path, timeout=int(timeout))
if clean == "no":
mount_lv(lv_path, session)
finally:
if clean == "yes":
umount_lv(lv_path, session)
error.context("removing logical volume %s" % lv_name)
session.cmd("lvremove %s" % lv_name)
error.context("disabling volume group %s" % vg_name)
session.cmd("vgchange -a n %s" % vg_name)
error.context("removing volume group %s" % vg_name)
session.cmd("vgremove -f %s" % vg_name)
| gpl-2.0 | 175,448,031,895,964,350 | 32.329545 | 77 | 0.601091 | false |
arbrandes/edx-platform | openedx/core/djangoapps/demographics/migrations/0002_clean_duplicate_entries.py | 5 | 1861 | import logging
from django.conf import settings
from django.db import migrations, models
log = logging.getLogger(__name__)
def _clean_duplicate_entries(apps, schema_editor):
"""
This method finds all the duplicate user entries in the UserDemographics model
and then removes all duplicate entries except for the most recently modified one.
"""
demographics_model = apps.get_model('demographics', 'UserDemographics')
# Retrieve a list of all users that have more than one entry.
duplicate_users = (
demographics_model.objects.values(
'user'
).annotate(models.Count('id')).values('user').order_by().filter(id__count__gt=1)
)
# Get a QuerySet of all the UserDemographics instances for the duplicates
# sorted by user and modified in descending order.
user_demographic_dupes = demographics_model.objects.filter(user__in=duplicate_users).order_by('user', '-modified')
# Go through the QuerySet and only keep the most recent instance.
existing_user_ids = set()
for demographic in user_demographic_dupes:
if demographic.user_id in existing_user_ids:
log.info('UserDemographics {user} -- {modified}'.format(
user=demographic.user_id, modified=demographic.modified
))
demographic.delete()
else:
log.info('UserDemographics Duplicate User Delete {user} -- {modified}'.format(
user=demographic.user_id, modified=demographic.modified
))
existing_user_ids.add(demographic.user_id)
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('demographics', '0001_initial'),
]
operations = [
migrations.RunPython(_clean_duplicate_entries, migrations.RunPython.noop),
]
| agpl-3.0 | -861,699,311,314,612,100 | 37.770833 | 118 | 0.674369 | false |
Hazardius/wesnoth-maps-guess | __init__.py | 1 | 2255 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import getopt
import hashlib
import sys
from hop_net import HopfieldNetwork
def usage():
print ""
print " Valid arguments are:"
print ""
print " --debug - run generator in debug mode"
print " --help - show this message"
print " --no-save - don't save the results - only show them in console"
print " --out - output file, default \"results.txt\""
print " --patterns - output file, default \"patternsT.pat\""
print " --prob - prob mode - patterns are given in probabilities"
print " --seed - seed for RNG"
print " --tests - output file, default \"testsT.tst\""
print " -d - same as --debug"
print " -h - same as --help"
print " -n - same as --no-save"
print " -o - same as --out"
print " -p - same as --patterns"
print " -s - same as --seed"
print " -t - same as --tests"
print ""
def main(argv):
debug = False
save_res = True
prob = False
patterns_file = "patternsT.pat"
tests_file = "testsT.tst"
out = "results.txt"
seed = None
inputs_number = 98
try:
opts, _ = getopt.getopt(
argv,
"dhno:p:s:t:",
["help", "debug", "no-save", "out=", "patterns=", "prob", "seed=", "tests="]
)
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', "--help"):
usage()
sys.exit()
elif opt in ('-d', "--debug"):
debug = True
elif opt in ('-n', "--no-save"):
save_res = False
elif opt in ('-o', "--out"):
out = arg
elif opt in ('-p', "--patterns"):
patterns_file = arg
elif opt in ("--prob"):
prob = True
elif opt in ('-s', "--seed"):
seed = int(hashlib.sha1(arg).hexdigest(), 16) % 4294967295
elif opt in ('-t', "--tests"):
tests_file = arg
HopfieldNetwork(inputs_number, patterns_file, tests_file, out, debug, save_res, seed, prob)
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-2.0 | 4,351,925,335,831,189,500 | 28.671053 | 95 | 0.4949 | false |
SheffieldML/GPyOpt | GPyOpt/testing/functional_tests/base_test_case.py | 1 | 3014 | import os
import numpy as np
import unittest
from mock import patch
from driver import run_eval, run_evaluation_in_steps
from mocks import MockModel
class BaseTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(BaseTestCase, self).__init__(*args, **kwargs)
# This file was used to generate the test files
self.outpath = os.path.join(os.path.dirname(__file__), 'test_files')
# Change this False to generate test files
self.is_unittest = True
# Allowed margin of error for test outputs
self.precision = 1e-6
def get_result_filename(self, test_name):
return '{}_{}'.format(test_name, 'acquisition_gradient_testfile')
def load_result_file(self, test_name):
filename = self.get_result_filename(test_name)
file_path = '{}/{}.txt'.format(self.outpath, filename)
original_result = np.loadtxt(file_path)
return original_result
@patch('GPyOpt.methods.BayesianOptimization._model_chooser')
def check_configs(self, mock_model_chooser, mock_gpy_model = None, mock_model = MockModel()):
if mock_gpy_model is not None:
mock_model.model = mock_gpy_model
mock_model_chooser.return_value = mock_model
for m_c in self.methods_configs:
np.random.seed(1)
if mock_gpy_model is not None:
mock_model.model = mock_gpy_model
mock_model_chooser.return_value = mock_model
print('Testing acquisition ' + m_c['name'])
name = self.get_result_filename(m_c['name'])
unittest_result = run_eval(problem_config= self.problem_config, f_inits= self.f_inits, method_config=m_c, name=name, outpath=self.outpath, time_limit=None, unittest = self.is_unittest)
original_result = self.load_result_file(m_c['name'])
self.assertTrue((abs(original_result - unittest_result) < self.precision).all(), msg=m_c['name'] + ' failed')
@patch('GPyOpt.methods.BayesianOptimization._model_chooser')
def check_configs_in_steps(self, mock_model_chooser, mock_gpy_model=None, init_num_steps=None):
for m_c in self.methods_configs:
np.random.seed(1)
mock_model = MockModel()
if mock_gpy_model is not None:
mock_model.model = mock_gpy_model
mock_model_chooser.return_value = mock_model
print('Testing acquisition ' + m_c['name'] + ' in steps')
original_result = self.load_result_file(m_c['name'])
if init_num_steps is None:
num_steps = original_result.shape[0] - self.f_inits.shape[0]
else:
num_steps = init_num_steps
unittest_result = run_evaluation_in_steps(problem_config= self.problem_config, f_inits= self.f_inits, method_config=m_c, num_steps=num_steps)
self.assertTrue((abs(original_result - unittest_result) < self.precision).all(), msg=m_c['name'] + ' failed step-by-step check')
| bsd-3-clause | -3,905,780,658,736,581,000 | 40.861111 | 196 | 0.630723 | false |
mancoast/CPythonPyc_test | fail/301_test_iterlen.py | 8 | 7750 | """ Test Iterator Length Transparency
Some functions or methods which accept general iterable arguments have
optional, more efficient code paths if they know how many items to expect.
For instance, map(func, iterable), will pre-allocate the exact amount of
space required whenever the iterable can report its length.
The desired invariant is: len(it)==len(list(it)).
A complication is that an iterable and iterator can be the same object. To
maintain the invariant, an iterator needs to dynamically update its length.
For instance, an iterable such as range(10) always reports its length as ten,
but it=iter(range(10)) starts at ten, and then goes to nine after next(it).
Having this capability means that map() can ignore the distinction between
map(func, iterable) and map(func, iter(iterable)).
When the iterable is immutable, the implementation can straight-forwardly
report the original length minus the cumulative number of calls to next().
This is the case for tuples, range objects, and itertools.repeat().
Some containers become temporarily immutable during iteration. This includes
dicts, sets, and collections.deque. Their implementation is equally simple
though they need to permantently set their length to zero whenever there is
an attempt to iterate after a length mutation.
The situation slightly more involved whenever an object allows length mutation
during iteration. Lists and sequence iterators are dynanamically updatable.
So, if a list is extended during iteration, the iterator will continue through
the new items. If it shrinks to a point before the most recent iteration,
then no further items are available and the length is reported at zero.
Reversed objects can also be wrapped around mutable objects; however, any
appends after the current position are ignored. Any other approach leads
to confusion and possibly returning the same item more than once.
The iterators not listed above, such as enumerate and the other itertools,
are not length transparent because they have no way to distinguish between
iterables that report static length and iterators whose length changes with
each call (i.e. the difference between enumerate('abc') and
enumerate(iter('abc')).
"""
import unittest
from test import support
from itertools import repeat
from collections import deque
from builtins import len as _len
n = 10
def len(obj):
try:
return _len(obj)
except TypeError:
try:
# note: this is an internal undocumented API,
# don't rely on it in your own programs
return obj.__length_hint__()
except AttributeError:
raise TypeError
class TestInvariantWithoutMutations(unittest.TestCase):
def test_invariant(self):
it = self.it
for i in reversed(range(1, n+1)):
self.assertEqual(len(it), i)
next(it)
self.assertEqual(len(it), 0)
self.assertRaises(StopIteration, next, it)
self.assertEqual(len(it), 0)
class TestTemporarilyImmutable(TestInvariantWithoutMutations):
def test_immutable_during_iteration(self):
# objects such as deques, sets, and dictionaries enforce
# length immutability during iteration
it = self.it
self.assertEqual(len(it), n)
next(it)
self.assertEqual(len(it), n-1)
self.mutate()
self.assertRaises(RuntimeError, next, it)
self.assertEqual(len(it), 0)
## ------- Concrete Type Tests -------
class TestRepeat(TestInvariantWithoutMutations):
def setUp(self):
self.it = repeat(None, n)
def test_no_len_for_infinite_repeat(self):
# The repeat() object can also be infinite
self.assertRaises(TypeError, len, repeat(None))
class TestXrange(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(range(n))
class TestXrangeCustomReversed(TestInvariantWithoutMutations):
def setUp(self):
self.it = reversed(range(n))
class TestTuple(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(tuple(range(n)))
## ------- Types that should not be mutated during iteration -------
class TestDeque(TestTemporarilyImmutable):
def setUp(self):
d = deque(range(n))
self.it = iter(d)
self.mutate = d.pop
class TestDequeReversed(TestTemporarilyImmutable):
def setUp(self):
d = deque(range(n))
self.it = reversed(d)
self.mutate = d.pop
class TestDictKeys(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d)
self.mutate = d.popitem
class TestDictItems(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d.items())
self.mutate = d.popitem
class TestDictValues(TestTemporarilyImmutable):
def setUp(self):
d = dict.fromkeys(range(n))
self.it = iter(d.values())
self.mutate = d.popitem
class TestSet(TestTemporarilyImmutable):
def setUp(self):
d = set(range(n))
self.it = iter(d)
self.mutate = d.pop
## ------- Types that can mutate during iteration -------
class TestList(TestInvariantWithoutMutations):
def setUp(self):
self.it = iter(range(n))
def test_mutation(self):
d = list(range(n))
it = iter(d)
next(it)
next(it)
self.assertEqual(len(it), n-2)
d.append(n)
self.assertEqual(len(it), n-1) # grow with append
d[1:] = []
self.assertEqual(len(it), 0)
self.assertEqual(list(it), [])
d.extend(range(20))
self.assertEqual(len(it), 0)
class TestListReversed(TestInvariantWithoutMutations):
def setUp(self):
self.it = reversed(range(n))
def test_mutation(self):
d = list(range(n))
it = reversed(d)
next(it)
next(it)
self.assertEqual(len(it), n-2)
d.append(n)
self.assertEqual(len(it), n-2) # ignore append
d[1:] = []
self.assertEqual(len(it), 0)
self.assertEqual(list(it), []) # confirm invariant
d.extend(range(20))
self.assertEqual(len(it), 0)
## -- Check to make sure exceptions are not suppressed by __length_hint__()
class BadLen(object):
def __iter__(self): return iter(range(10))
def __len__(self):
raise RuntimeError('hello')
class BadLengthHint(object):
def __iter__(self): return iter(range(10))
def __length_hint__(self):
raise RuntimeError('hello')
class NoneLengthHint(object):
def __iter__(self): return iter(range(10))
def __length_hint__(self):
return None
class TestLengthHintExceptions(unittest.TestCase):
def test_issue1242657(self):
self.assertRaises(RuntimeError, list, BadLen())
self.assertRaises(RuntimeError, list, BadLengthHint())
self.assertRaises(RuntimeError, [].extend, BadLen())
self.assertRaises(RuntimeError, [].extend, BadLengthHint())
b = bytearray(range(10))
self.assertRaises(RuntimeError, b.extend, BadLen())
self.assertRaises(RuntimeError, b.extend, BadLengthHint())
def test_invalid_hint(self):
# Make sure an invalid result doesn't muck-up the works
self.assertEqual(list(NoneLengthHint()), list(range(10)))
def test_main():
unittests = [
TestRepeat,
TestXrange,
TestXrangeCustomReversed,
TestTuple,
TestDeque,
TestDequeReversed,
TestDictKeys,
TestDictItems,
TestDictValues,
TestSet,
TestList,
TestListReversed,
TestLengthHintExceptions,
]
support.run_unittest(*unittests)
if __name__ == "__main__":
test_main()
| gpl-3.0 | 6,387,546,859,020,444,000 | 29.876494 | 78 | 0.669419 | false |
char101/pyjade | pyjade/testsuite/test_inline_lexer.py | 7 | 10822 | from pyjade.lexer import Lexer
from pyjade.utils import odict
expected_results = {
"p Here is some #[strong: em text] and look at #[a(href='http://google.com') this link!]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u'Here is some '},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'strong'},
{'buffer': None, 'type': ':', 'line': 1, 'inline_level': 1, 'val': None},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'em'},
{'buffer': None, 'type': 'text', 'line': 1, 'inline_level': 1, 'val': u' text'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u' and look at '},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'tag', 'val': u'a'},
{'inline_level': 1, 'val': None, 'buffer': None, 'static_attrs': set([u'href']), 'attrs': odict([(u'href', u"'http://google.com'")]), 'line': 1, 'type': 'attrs'},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'text', 'val': u' this link!'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"p Other inline #[strong= 'test']": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u'Other inline '},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'strong'},
{'inline_level': 1, 'val': u" 'test'", 'buffer': True, 'escape': True, 'line': 1, 'type': 'code'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"p Test #[|text line]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u'Test '},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u'text line'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"p Test buffered #[= map(str, zip('iln', 'nie')) + 'code']": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u'Test buffered '},
{'inline_level': 1, 'val': u" map(str, zip('iln', 'nie')) + 'code'", 'buffer': True, 'escape': True, 'line': 1, 'type': 'code'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"p #[- abcf = [[123, [[],[]], []],'abc']] #[= abcf]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{'inline_level': 1, 'val': u" abcf = [[123, [[],[]], []],'abc']", 'buffer': False, 'escape': False, 'line': 1, 'type': 'code'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u' '},
{'inline_level': 1, 'val': u' abcf', 'buffer': True, 'escape': True, 'line': 1, 'type': 'code'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"#[#[#[a a#[b #[i a] b]] d]e]": [
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u''},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 2, 'val': u''},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 3, 'val': u'a'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 3, 'val': u'a'},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 4, 'val': u'b'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 4, 'val': u''},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 5, 'val': u'i'},
{'buffer': None, 'type': 'text', 'line': 1, 'inline_level': 5, 'val': u' a'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 4, 'val': u' b'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 3, 'val': u''},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 2, 'val': u' d'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u'e'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"p We can also #[strong combine #[em multiple #[img(src='http://jade-lang.com/style/logo.png')]]]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u'We can also '},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'strong'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u'combine '},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 2, 'val': u'em'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 2, 'val': u'multiple '},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 3, 'val': u'img'},
{'inline_level': 3, 'val': None, 'buffer': None, 'static_attrs': set([u'src']), 'attrs': odict([(u'src', u"'http://jade-lang.com/style/logo.png'")]), 'line': 1, 'type': 'attrs'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 2, 'val': u''},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u''},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"#[strong start] line with #[i]\#[j] inline": [
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'strong'},
{'buffer': None, 'type': 'text', 'line': 1, 'inline_level': 1, 'val': u' start'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u' line with '},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'i'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u'#[j] inline'}],
"p Another #[strong.lil#okf(acs=[1,2]) test [[with brackets]] [in#[='side']]]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u'Another '},
{'buffer': None, 'type': 'tag', 'line': 1, 'inline_level': 1, 'val': u'strong'},
{'buffer': None, 'type': 'class', 'line': 1, 'inline_level': 1, 'val': u'lil'},
{'buffer': None, 'type': 'id', 'line': 1, 'inline_level': 1, 'val': u'okf'},
{'val': None, 'buffer': None, 'static_attrs': set([]), 'attrs': odict([(u'acs', u'[1,2]')]), 'line': 1, 'type': 'attrs', 'inline_level': 1},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u'test [[with brackets]] [in'},
{'inline_level': 2, 'val': u"'side'", 'buffer': True, 'escape': True, 'line': 1, 'type': 'code'},
{'buffer': None, 'type': 'string', 'line': 1, 'inline_level': 1, 'val': u']'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"""mixin lala(a, b)
span lala(#{a}, #{b})
p Test inline mixin #[+lala(123, 'lala inside inline')] end""": [
{'args': u'a, b', 'buffer': None, 'line': 1, 'type': 'mixin', 'inline_level': 0, 'val': u'lala'},
{'buffer': None, 'line': 2, 'type': 'indent', 'inline_level': 0, 'val': 2},
{'buffer': None, 'line': 2, 'type': 'tag', 'inline_level': 0, 'val': u'span'},
{'buffer': None, 'line': 2, 'type': 'text', 'inline_level': 0, 'val': u' lala(#{a}, #{b})'},
{'buffer': None, 'line': 3, 'type': 'outdent', 'inline_level': 0, 'val': None},
{'buffer': None, 'line': 3, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{'buffer': None, 'line': 3, 'type': 'string', 'inline_level': 0, 'val': u'Test inline mixin '},
{'inline_level': 1, 'val': u'lala', 'buffer': None, 'args': u"123, 'lala inside inline'", 'line': 1, 'type': 'call'},
{'buffer': None, 'line': 3, 'type': 'string', 'inline_level': 0, 'val': u' end'}],
"p only class #[.strong: em inline]": [
{'buffer': None, 'line': 1, 'type': 'tag', 'inline_level': 0, 'val': u'p'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u'only class '},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'class', 'val': u'strong'},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': ':', 'val': None},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'tag', 'val': u'em'},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'text', 'val': u' inline'},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"#[asdf.lol(fff)#[asdf]]": [
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'tag', 'val': u'asdf'},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'class', 'val': u'lol'},
{'inline_level': 1, 'val': None, 'buffer': None, 'static_attrs': set([u'fff']), 'attrs': odict([(u'fff', True)]), 'line': 1, 'type': 'attrs'},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'string', 'val': u''},
{'buffer': None, 'inline_level': 2, 'line': 1, 'type': 'tag', 'val': u'asdf'},
{'buffer': None, 'inline_level': 1, 'line': 1, 'type': 'string', 'val': u''},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"#[= '[[[[[[[[[[']": [
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{'buffer': True, 'line': 1, 'type': 'code', 'val': u" '[[[[[[[[[['", 'escape': True, 'inline_level': 1},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
"#[= ']]]]]]]]]]']": [
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''},
{'buffer': True, 'line': 1, 'type': 'code', 'val': u" ']]]]]]]]]]'", 'escape': True, 'inline_level': 1},
{'buffer': None, 'line': 1, 'type': 'string', 'inline_level': 0, 'val': u''}],
}
def generate_expected(jade):
lx = Lexer(jade)
res = []
while True:
tok = lx.advance()
if tok.type == 'eos':
break
res.append(tok.__dict__)
return res
def process(jade):
assert expected_results[jade] == generate_expected(jade)
def test_lexer():
import six
for k, v in six.iteritems(expected_results):
yield process, k
| mit | -7,361,041,636,928,403,000 | 68.371795 | 186 | 0.489928 | false |
xzYue/odoo | addons/hw_escpos/controllers/main.py | 13 | 14014 | # -*- coding: utf-8 -*-
import commands
import logging
import simplejson
import os
import os.path
import io
import base64
import openerp
import time
import random
import math
import md5
import openerp.addons.hw_proxy.controllers.main as hw_proxy
import pickle
import re
import subprocess
import traceback
try:
from .. escpos import *
from .. escpos.exceptions import *
from .. escpos.printer import Usb
except ImportError:
escpos = printer = None
from threading import Thread, Lock
from Queue import Queue, Empty
try:
import usb.core
except ImportError:
usb = None
from PIL import Image
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
# workaround https://bugs.launchpad.net/openobject-server/+bug/947231
# related to http://bugs.python.org/issue7980
from datetime import datetime
datetime.strptime('2012-01-01', '%Y-%m-%d')
class EscposDriver(Thread):
def __init__(self):
Thread.__init__(self)
self.queue = Queue()
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
def connected_usb_devices(self):
connected = []
# printers can either define bDeviceClass=7, or they can define one of
# their interfaces with bInterfaceClass=7. This class checks for both.
class FindUsbClass(object):
def __init__(self, usb_class):
self._class = usb_class
def __call__(self, device):
# first, let's check the device
if device.bDeviceClass == self._class:
return True
# transverse all devices and look through their interfaces to
# find a matching class
for cfg in device:
intf = usb.util.find_descriptor(cfg, bInterfaceClass=self._class)
if intf is not None:
return True
return False
printers = usb.core.find(find_all=True, custom_match=FindUsbClass(7))
# if no printers are found after this step we will take the
# first epson or star device we can find.
# epson
if not printers:
printers = usb.core.find(find_all=True, idVendor=0x04b8)
# star
if not printers:
printers = usb.core.find(find_all=True, idVendor=0x0519)
for printer in printers:
connected.append({
'vendor': printer.idVendor,
'product': printer.idProduct,
'name': usb.util.get_string(printer, 256, printer.iManufacturer) + " " + usb.util.get_string(printer, 256, printer.iProduct)
})
return connected
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def get_escpos_printer(self):
printers = self.connected_usb_devices()
if len(printers) > 0:
self.set_status('connected','Connected to '+printers[0]['name'])
return Usb(printers[0]['vendor'], printers[0]['product'])
else:
self.set_status('disconnected','Printer Not Found')
return None
def get_status(self):
self.push_task('status')
return self.status
def open_cashbox(self,printer):
printer.cashdraw(2)
printer.cashdraw(5)
def set_status(self, status, message = None):
_logger.info(status+' : '+ (message or 'no message'))
if status == self.status['status']:
if message != None and (len(self.status['messages']) == 0 or message != self.status['messages'][-1]):
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('ESC/POS Error: '+message)
elif status == 'disconnected' and message:
_logger.warning('ESC/POS Device Disconnected: '+message)
def run(self):
printer = None
if not escpos:
_logger.error('ESC/POS cannot initialize, please verify system dependencies.')
return
while True:
try:
error = True
timestamp, task, data = self.queue.get(True)
printer = self.get_escpos_printer()
if printer == None:
if task != 'status':
self.queue.put((timestamp,task,data))
error = False
time.sleep(5)
continue
elif task == 'receipt':
if timestamp >= time.time() - 1 * 60 * 60:
self.print_receipt_body(printer,data)
printer.cut()
elif task == 'xml_receipt':
if timestamp >= time.time() - 1 * 60 * 60:
printer.receipt(data)
elif task == 'cashbox':
if timestamp >= time.time() - 12:
self.open_cashbox(printer)
elif task == 'printstatus':
self.print_status(printer)
elif task == 'status':
pass
error = False
except NoDeviceError as e:
print "No device found %s" %str(e)
except HandleDeviceError as e:
print "Impossible to handle the device due to previous error %s" % str(e)
except TicketNotPrinted as e:
print "The ticket does not seems to have been fully printed %s" % str(e)
except NoStatusError as e:
print "Impossible to get the status of the printer %s" % str(e)
except Exception as e:
self.set_status('error', str(e))
errmsg = str(e) + '\n' + '-'*60+'\n' + traceback.format_exc() + '-'*60 + '\n'
_logger.error(errmsg);
finally:
if error:
self.queue.put((timestamp, task, data))
if printer:
printer.close()
def push_task(self,task, data = None):
self.lockedstart()
self.queue.put((time.time(),task,data))
def print_status(self,eprint):
localips = ['0.0.0.0','127.0.0.1','127.0.1.1']
hosting_ap = os.system('pgrep hostapd') == 0
ssid = subprocess.check_output('iwconfig 2>&1 | grep \'ESSID:"\' | sed \'s/.*"\\(.*\\)"/\\1/\'', shell=True).rstrip()
ips = [ c.split(':')[1].split(' ')[0] for c in commands.getoutput("/sbin/ifconfig").split('\n') if 'inet addr' in c ]
ips = [ ip for ip in ips if ip not in localips ]
eprint.text('\n\n')
eprint.set(align='center',type='b',height=2,width=2)
eprint.text('PosBox Status\n')
eprint.text('\n')
eprint.set(align='center')
if hosting_ap:
eprint.text('Wireless network:\nPosbox\n\n')
elif ssid:
eprint.text('Wireless network:\n' + ssid + '\n\n')
if len(ips) == 0:
eprint.text('ERROR: Could not connect to LAN\n\nPlease check that the PosBox is correc-\ntly connected with a network cable,\n that the LAN is setup with DHCP, and\nthat network addresses are available')
elif len(ips) == 1:
eprint.text('IP Address:\n'+ips[0]+'\n')
else:
eprint.text('IP Addresses:\n')
for ip in ips:
eprint.text(ip+'\n')
if len(ips) >= 1:
eprint.text('\nHomepage:\nhttp://'+ips[0]+':8069\n')
eprint.text('\n\n')
eprint.cut()
def print_receipt_body(self,eprint,receipt):
def check(string):
return string != True and bool(string) and string.strip()
def price(amount):
return ("{0:."+str(receipt['precision']['price'])+"f}").format(amount)
def money(amount):
return ("{0:."+str(receipt['precision']['money'])+"f}").format(amount)
def quantity(amount):
if math.floor(amount) != amount:
return ("{0:."+str(receipt['precision']['quantity'])+"f}").format(amount)
else:
return str(amount)
def printline(left, right='', width=40, ratio=0.5, indent=0):
lwidth = int(width * ratio)
rwidth = width - lwidth
lwidth = lwidth - indent
left = left[:lwidth]
if len(left) != lwidth:
left = left + ' ' * (lwidth - len(left))
right = right[-rwidth:]
if len(right) != rwidth:
right = ' ' * (rwidth - len(right)) + right
return ' ' * indent + left + right + '\n'
def print_taxes():
taxes = receipt['tax_details']
for tax in taxes:
eprint.text(printline(tax['tax']['name'],price(tax['amount']), width=40,ratio=0.6))
# Receipt Header
if receipt['company']['logo']:
eprint.set(align='center')
eprint.print_base64_image(receipt['company']['logo'])
eprint.text('\n')
else:
eprint.set(align='center',type='b',height=2,width=2)
eprint.text(receipt['company']['name'] + '\n')
eprint.set(align='center',type='b')
if check(receipt['company']['contact_address']):
eprint.text(receipt['company']['contact_address'] + '\n')
if check(receipt['company']['phone']):
eprint.text('Tel:' + receipt['company']['phone'] + '\n')
if check(receipt['company']['vat']):
eprint.text('VAT:' + receipt['company']['vat'] + '\n')
if check(receipt['company']['email']):
eprint.text(receipt['company']['email'] + '\n')
if check(receipt['company']['website']):
eprint.text(receipt['company']['website'] + '\n')
if check(receipt['header']):
eprint.text(receipt['header']+'\n')
if check(receipt['cashier']):
eprint.text('-'*32+'\n')
eprint.text('Served by '+receipt['cashier']+'\n')
# Orderlines
eprint.text('\n\n')
eprint.set(align='center')
for line in receipt['orderlines']:
pricestr = price(line['price_display'])
if line['discount'] == 0 and line['unit_name'] == 'Unit(s)' and line['quantity'] == 1:
eprint.text(printline(line['product_name'],pricestr,ratio=0.6))
else:
eprint.text(printline(line['product_name'],ratio=0.6))
if line['discount'] != 0:
eprint.text(printline('Discount: '+str(line['discount'])+'%', ratio=0.6, indent=2))
if line['unit_name'] == 'Unit(s)':
eprint.text( printline( quantity(line['quantity']) + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
else:
eprint.text( printline( quantity(line['quantity']) + line['unit_name'] + ' x ' + price(line['price']), pricestr, ratio=0.6, indent=2))
# Subtotal if the taxes are not included
taxincluded = True
if money(receipt['subtotal']) != money(receipt['total_with_tax']):
eprint.text(printline('','-------'));
eprint.text(printline(_('Subtotal'),money(receipt['subtotal']),width=40, ratio=0.6))
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
taxincluded = False
# Total
eprint.text(printline('','-------'));
eprint.set(align='center',height=2)
eprint.text(printline(_(' TOTAL'),money(receipt['total_with_tax']),width=40, ratio=0.6))
eprint.text('\n\n');
# Paymentlines
eprint.set(align='center')
for line in receipt['paymentlines']:
eprint.text(printline(line['journal'], money(line['amount']), ratio=0.6))
eprint.text('\n');
eprint.set(align='center',height=2)
eprint.text(printline(_(' CHANGE'),money(receipt['change']),width=40, ratio=0.6))
eprint.set(align='center')
eprint.text('\n');
# Extra Payment info
if receipt['total_discount'] != 0:
eprint.text(printline(_('Discounts'),money(receipt['total_discount']),width=40, ratio=0.6))
if taxincluded:
print_taxes()
#eprint.text(printline(_('Taxes'),money(receipt['total_tax']),width=40, ratio=0.6))
# Footer
if check(receipt['footer']):
eprint.text('\n'+receipt['footer']+'\n\n')
eprint.text(receipt['name']+'\n')
eprint.text( str(receipt['date']['date']).zfill(2)
+'/'+ str(receipt['date']['month']+1).zfill(2)
+'/'+ str(receipt['date']['year']).zfill(4)
+' '+ str(receipt['date']['hour']).zfill(2)
+':'+ str(receipt['date']['minute']).zfill(2) )
driver = EscposDriver()
driver.push_task('printstatus')
hw_proxy.drivers['escpos'] = driver
class EscposProxy(hw_proxy.Proxy):
@http.route('/hw_proxy/open_cashbox', type='json', auth='none', cors='*')
def open_cashbox(self):
_logger.info('ESC/POS: OPEN CASHBOX')
driver.push_task('cashbox')
@http.route('/hw_proxy/print_receipt', type='json', auth='none', cors='*')
def print_receipt(self, receipt):
_logger.info('ESC/POS: PRINT RECEIPT')
driver.push_task('receipt',receipt)
@http.route('/hw_proxy/print_xml_receipt', type='json', auth='none', cors='*')
def print_xml_receipt(self, receipt):
_logger.info('ESC/POS: PRINT XML RECEIPT')
driver.push_task('xml_receipt',receipt)
| agpl-3.0 | -6,520,309,028,010,183,000 | 36.672043 | 215 | 0.537819 | false |
CiscoSystems/nova | nova/tests/integrated/v3/test_admin_actions.py | 20 | 1909 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.integrated.v3 import test_servers
class AdminActionsSamplesJsonTest(test_servers.ServersSampleBase):
extension_name = "os-admin-actions"
def setUp(self):
"""setUp Method for AdminActions api samples extension
This method creates the server that will be used in each tests
"""
super(AdminActionsSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
def test_post_reset_network(self):
# Get api samples to reset server network request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-network', {})
self.assertEqual(response.status, 202)
def test_post_inject_network_info(self):
# Get api samples to inject network info request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-inject-network-info', {})
self.assertEqual(response.status, 202)
def test_post_reset_state(self):
# get api samples to server reset state request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-server-state', {})
self.assertEqual(response.status, 202)
| apache-2.0 | -5,774,711,405,961,999,000 | 40.5 | 78 | 0.657936 | false |
shifter/rekall | rekall-core/rekall/plugins/windows/taskmods.py | 3 | 8383 | # Rekall Memory Forensics
# Copyright (C) 2007-2011 Volatile Systems
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Additional Authors:
# Michael Cohen <[email protected]>
# Mike Auty <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# pylint: disable=protected-access
from rekall import testlib
from rekall.plugins import core
from rekall.plugins.windows import common
from rekall import plugin
from rekall.ui import text
class WinPsList(common.WinProcessFilter):
"""List processes for windows."""
__name = "pslist"
eprocess = None
@classmethod
def args(cls, metadata):
super(WinPsList, cls).args(metadata)
metadata.set_description("""
Lists the processes by following the _EPROCESS.PsActiveList.
In the windows operating system, processes are linked together through a
doubly linked list. This plugin follows the list around, printing
information about each process.
To begin, we need to find any element on the list. This can be done by:
1) Obtaining the _KDDEBUGGER_DATA64.PsActiveProcessHead - debug
information.
2) Finding any _EPROCESS in memory (e.g. through psscan) and following
its list.
This plugin supports both approaches.
""")
def render(self, renderer):
renderer.table_header([
dict(type="_EPROCESS", cname="_EPROCESS"),
dict(name="PPID", cname="ppid", width=6, align="r"),
dict(name="Thds", cname="thread_count", width=6, align="r"),
dict(name="Hnds", cname="handle_count", width=8, align="r"),
dict(name="Sess", cname="session_id", width=6, align="r"),
dict(name="Wow64", cname="wow64", width=6),
dict(name="Start", cname="process_create_time", width=24),
dict(name="Exit", cname="process_exit_time", width=24)])
for task in self.filter_processes():
renderer.table_row(task,
task.InheritedFromUniqueProcessId,
task.ActiveThreads,
task.ObjectTable.m("HandleCount"),
task.SessionId,
task.IsWow64,
task.CreateTime,
task.ExitTime,
)
class WinDllList(common.WinProcessFilter):
"""Prints a list of dll modules mapped into each process."""
__name = "dlllist"
def render(self, renderer):
for task in self.filter_processes():
pid = task.UniqueProcessId
renderer.section()
renderer.format(u"{0} pid: {1:6}\n", task.ImageFileName, pid)
if task.Peb:
renderer.format(u"Command line : {0}\n",
task.Peb.ProcessParameters.CommandLine)
if task.IsWow64:
renderer.format(u"Note: use ldrmodules for listing DLLs "
"in Wow64 processes\n")
renderer.format(u"{0}\n\n", task.Peb.CSDVersion)
renderer.table_header([("Base", "module_base", "[addrpad]"),
("Size", "module_size", "[addr]"),
("Load Reason/Count", "reason", "30"),
("Path", "loaded_dll_path", ""),
])
for m in task.get_load_modules():
renderer.table_row(m.DllBase, m.SizeOfImage,
m.LoadReason, m.FullDllName)
else:
renderer.format("Unable to read PEB for task.\n")
class WinMemMap(core.MemmapMixIn, common.WinProcessFilter):
"""Calculates the memory regions mapped by a process."""
__name = "memmap"
def _get_highest_user_address(self):
return self.profile.get_constant_object(
"MmHighestUserAddress", "Pointer").v()
class WinMemDump(core.DirectoryDumperMixin, WinMemMap):
"""Dump the addressable memory for a process"""
__name = "memdump"
def dump_process(self, eprocess, fd, index_fd):
task_as = eprocess.get_process_address_space()
highest_address = self._get_highest_user_address()
temp_renderer = text.TextRenderer(session=self.session,
fd=index_fd)
with temp_renderer.start():
temp_renderer.table_header([
("File Address", "file_addr", "[addrpad]"),
("Length", "length", "[addrpad]"),
("Virtual Addr", "virtual", "[addrpad]")])
for _ in task_as.get_available_addresses():
virt_address, phys_address, length = _
if not self.all and virt_address > highest_address:
break
data = self.physical_address_space.read(phys_address, length)
temp_renderer.table_row(fd.tell(), length, virt_address)
fd.write(data)
def render(self, renderer):
if self.dump_dir is None:
raise plugin.PluginError("Dump directory not specified.")
for task in self.filter_processes():
renderer.section()
filename = u"{0}_{1:d}.dmp".format(
task.ImageFileName, task.UniqueProcessId)
renderer.format(u"Writing {0} {1:#x} to {2}\n",
task.ImageFileName, task, filename)
with renderer.open(directory=self.dump_dir,
filename=filename,
mode='wb') as fd:
with renderer.open(directory=self.dump_dir,
filename=filename + ".idx",
mode='wb') as index_fd:
self.dump_process(task, fd, index_fd)
class Threads(common.WinProcessFilter):
"""Enumerate threads."""
name = "threads"
def render(self, renderer):
renderer.table_header(
[("_ETHREAD", "offset", "[addrpad]"),
("PID", "pid", ">6"),
("TID", "tid", ">6"),
("Start Address", "start", "[addrpad]"),
("Process", "name", "16"),
("Symbol", "symbol", "")])
cc = self.session.plugins.cc()
with cc:
for task in self.filter_processes():
# Resolve names in the process context.
cc.SwitchProcessContext(process=task)
for thread in task.ThreadListHead.list_of_type(
"_ETHREAD", "ThreadListEntry"):
renderer.table_row(
thread,
thread.Cid.UniqueProcess,
thread.Cid.UniqueThread,
thread.StartAddress,
task.ImageFileName,
self.session.address_resolver.format_address(
thread.Win32StartAddress,
max_distance=0xffffffff),
)
class TestWinMemDump(testlib.HashChecker):
"""Test the pslist module."""
PARAMETERS = dict(
commandline="memdump --pid=%(pid)s --dump_dir %(tempdir)s",
pid=2624)
class TestMemmap(testlib.SimpleTestCase):
"""Test the pslist module."""
PARAMETERS = dict(
commandline="memmap --pid=%(pid)s",
pid=2624)
class TestMemmapCoalesce(testlib.SimpleTestCase):
"""Make sure that memmaps are coalesced properly."""
PARAMETERS = dict(commandline="memmap --pid=%(pid)s --coalesce",
pid=2624)
| gpl-2.0 | 4,664,065,186,811,321,000 | 34.824786 | 80 | 0.55374 | false |
wood-galaxy/FreeCAD | src/Mod/Material/InitGui.py | 57 | 1621 | #***************************************************************************
#* *
#* Copyright (c) 2013 - Juergen Riegel <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
| lgpl-2.1 | -119,050,321,549,593,230 | 69.478261 | 78 | 0.396052 | false |
tumbl3w33d/ansible | lib/ansible/plugins/doc_fragments/docker.py | 9 | 7280 | # -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = r'''
options:
docker_host:
description:
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
the module will automatically replace C(tcp) in the connection URL with C(https).
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
instead. If the environment variable is not set, the default value will be used.
type: str
default: unix://var/run/docker.sock
aliases: [ docker_url ]
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
be used instead. If the environment variable is not set, the default value will be used.
type: str
default: localhost
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
used instead. If the environment variable is not set, the default value will be used.
type: str
default: auto
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
ca_cert:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_ca_cert, cacert_path ]
client_cert:
description:
- Path to the client's TLS certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_client_cert, cert_path ]
client_key:
description:
- Path to the client's TLS key file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_client_key, key_path ]
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by ssl.py module.
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
used instead.
type: str
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
instead. If the environment variable is not set, the default value will be used.
type: bool
default: no
validate_certs:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
used instead. If the environment variable is not set, the default value will be used.
type: bool
default: no
aliases: [ tls_verify ]
debug:
description:
- Debug mode
type: bool
default: no
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
U(https://docker-py.readthedocs.io/en/stable/machine/) for more details.
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(pip).
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
and use C($DOCKER_CONFIG/config.json) otherwise.
'''
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
DOCKER_PY_1_DOCUMENTATION = r'''
options: {}
requirements:
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
install the C(docker) Python module. Note that both modules should *not*
be installed at the same time. Also note that when both modules are installed
and one of them is uninstalled, the other might no longer function and a
reinstall of it is required."
'''
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
DOCKER_PY_2_DOCUMENTATION = r'''
options: {}
requirements:
- "Python >= 2.7"
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
This module does *not* work with docker-py."
'''
| gpl-3.0 | -395,994,057,431,448,700 | 52.529412 | 125 | 0.673077 | false |
veltzer/openbook | scripts/graph.py | 1 | 3860 | #!/usr/bin/env python
"""
this script gets the graph data for the openbook progress report
the idea is to be able to see in a graph the progress being made in
this project.
TODO:
- modify this script to produce counts for both jazz and non-jazz tunes.
(very easy). This way the data that is outputted will be related to the openbook pdf.
And do this also for completion level 5.
"""
import subprocess
import dateutil.parser
import os.path
import configparser
import getpass
import pymysql
import tqdm
from dateutil import tz
##############
# parameters #
##############
debug = False
doDb = True
#############
# functions #
#############
'''
get the configuration, including user and password from the ~/.my.cnf
file of the user
if no such file exists then use sensible defaults
'''
def get_config():
d = {}
inifile = os.path.expanduser('~/.my.cnf')
if os.path.isfile(inifile):
config = configparser.ConfigParser()
config.read(inifile)
if config.has_option('mysql', 'user'):
d['user'] = config.get('mysql', 'user')
else:
d['user'] = getpass.getuser()
if config.has_option('mysql', 'database'):
d['database'] = config.get('mysql', 'database')
else:
d['database'] = 'mysql'
if config.has_option('mysql', 'password'):
d['password'] = config.get('mysql', 'password')
return d
else:
d['user'] = getpass.getuser()
d['database'] = 'mysql'
return d
def main():
connection = pymysql.connect(**get_config())
cursor = None
row_id = None
if doDb:
# remove the old data
cursor = connection.cursor()
cursor.execute('SELECT id FROM TbGraph WHERE name=\'openbook_progress\'')
row = cursor.fetchone()
# only remove data if we already have data
if row is not None:
row_id = int(row[0])
if debug:
print('id is', row_id)
cursor.execute('DELETE from TbGraphData WHERE graphId=%s', (row_id,))
cursor.execute('DELETE from TbGraph WHERE id=%s', (row_id,))
# insert a new row into the graph meta data
cursor.execute('INSERT INTO TbGraph (name) VALUES(\'openbook_progress\')')
row_id = cursor.lastrowid
if debug:
print('id is', row_id)
# this gets all commits in the right order
commits = subprocess.check_output(['git', 'log', '--format=%H', '--reverse']).decode().split('\n')
# removes the extra element that I don't need which is the current commit
commits.pop()
for commit in tqdm.tqdm(commits):
d1 = subprocess.check_output(['git', 'show', '-s', '--format=%ci', commit]).decode().strip()
d2 = dateutil.parser.parse(d1)
dt = d2.astimezone(tz.tzutc())
count_mako = 0
count_temp = 0
count_gpp = 0
count_ly = 0
lines = subprocess.check_output(['git', 'ls-tree', '-r', commit]).decode().split('\n')
for line in lines:
if line.endswith('.mako'):
count_mako += 1
if line.endswith('.temp'):
count_temp += 1
if line.endswith('.gpp'):
count_gpp += 1
if line.endswith('.ly'):
count_ly += 1
count = max(count_mako, count_temp, count_gpp, count_ly)
if debug:
print('commit is', commit)
print('dt is', str(dt))
print('count is', str(count))
if doDb:
cursor.execute('INSERT INTO TbGraphData (tag,dt,value,graphId) VALUES(%s,%s,%s,%s)',
(commit, dt, count, row_id))
# commit everything...
if doDb:
cursor.close()
connection.commit()
connection.close()
if __name__ == "__main__":
main()
| gpl-3.0 | -4,699,438,864,874,608,000 | 29.634921 | 102 | 0.563472 | false |
huahang/typhoon-blade | src/blade/test_scheduler.py | 3 | 8125 | # Copyright (c) 2012 Tencent Inc.
# All rights reserved.
#
# Author: Michaelpeng <[email protected]>
# Date: February 29, 2012
"""
This is a thread module for blade which is used to spawn
threads to finish some kind of work.
"""
import Queue
import subprocess
import sys
import threading
import time
import traceback
import blade_util
import console
signal_map = {-1: 'SIGHUP', -2: 'SIGINT', -3: 'SIGQUIT',
-4: 'SIGILL', -5: 'SIGTRAP', -6: 'SIGABRT',
-7: 'SIGBUS', -8: 'SIGFPE', -9: 'SIGKILL',
-10: 'SIGUSR1', -11: 'SIGSEGV', -12: 'SIGUSR2',
-13: 'SIGPIPE', -14: 'SIGALRM', -15: 'SIGTERM',
-17: 'SIGCHLD', -18: 'SIGCONT', -19: 'SIGSTOP',
-20: 'SIGTSTP', -21: 'SIGTTIN', -22: 'SIGTTOU',
-23: 'SIGURG', -24: 'SIGXCPU', -25: 'SIGXFSZ',
-26: 'SIGVTALRM', -27: 'SIGPROF', -28: 'SIGWINCH',
-29: 'SIGIO', -30: 'SIGPWR', -31: 'SIGSYS'}
class WorkerThread(threading.Thread):
def __init__(self, worker_args, proc_func, args):
"""Init methods for this thread. """
threading.Thread.__init__(self)
self.worker_args = worker_args
self.func_args = args
self.job_handler = proc_func
self.thread_id = int(self.worker_args)
self.start_working_time = time.time()
self.end_working_time = None
self.ret = None
console.info('blade test executor %d starts to work' % self.thread_id)
def __process(self):
"""Private handler to handle one job. """
console.info('blade worker %d starts to process' % self.thread_id)
console.info('blade worker %d finish' % self.thread_id)
return
def get_return(self):
"""returns worker result to caller. """
return self.ret
def run(self):
"""executes and runs here. """
try:
if self.job_handler:
self.ret = self.job_handler(*self.func_args)
self.end_working_time = time.time()
return True
else:
self.__process()
return True
except:
(ErrorType, ErrorValue, ErrorTB) = sys.exc_info()
print sys.exc_info()
traceback.print_exc(ErrorTB)
class TestScheduler(object):
"""TestScheduler. """
def __init__(self, tests_list, jobs, tests_run_map):
"""init method. """
self.tests_list = tests_list
self.jobs = jobs
self.tests_run_map = tests_run_map
self.tests_run_map_lock = threading.Lock()
self.worker_threads = []
self.cpu_core_num = blade_util.cpu_count()
self.num_of_tests = len(self.tests_list)
self.max_worker_threads = 16
self.threads = []
self.tests_stdout_map = {}
self.failed_targets = []
self.failed_targets_lock = threading.Lock()
self.tests_stdout_lock = threading.Lock()
self.num_of_run_tests = 0
self.num_of_run_tests_lock = threading.Lock()
self.job_queue = Queue.Queue(0)
self.exclusive_job_queue = Queue.Queue(0)
def __get_workers_num(self):
"""get the number of thread workers. """
max_workers = max([self.cpu_core_num, self.max_worker_threads])
if max_workers == 0:
max_workers = self.max_worker_threads
if self.jobs <= 1:
return 1
elif self.jobs > max_workers:
self.jobs = max_workers
if self.num_of_tests <= self.jobs:
return self.num_of_tests
else:
return self.jobs
return 1
def __get_result(self, returncode):
"""translate result from returncode. """
result = 'SUCCESS'
if returncode:
result = signal_map.get(returncode, 'FAILED')
result = '%s:%s' % (result, returncode)
return result
def _run_job_redirect(self, job):
"""run job, redirect the output. """
(target, run_dir, test_env, cmd) = job
test_name = '%s:%s' % (target.path, target.name)
console.info('Running %s' % cmd)
p = subprocess.Popen(cmd,
env=test_env,
cwd=run_dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
(stdoutdata, stderrdata) = p.communicate()
result = self.__get_result(p.returncode)
console.info('Output of %s:\n%s\n%s finished: %s\n' % (test_name,
stdoutdata, test_name, result))
return p.returncode
def _run_job(self, job):
"""run job, do not redirect the output. """
(target, run_dir, test_env, cmd) = job
console.info('Running %s' % cmd)
p = subprocess.Popen(cmd, env=test_env, cwd=run_dir, close_fds=True)
p.wait()
result = self.__get_result(p.returncode)
console.info('%s/%s finished : %s\n' % (
target.path, target.name, result))
return p.returncode
def _process_command(self, job_queue, redirect):
"""process routine.
Each test is a tuple (target, run_dir, env, cmd)
"""
while not job_queue.empty():
job = job_queue.get()
target = job[0]
target_key = '%s:%s' % (target.path, target.name)
start_time = time.time()
try:
if redirect:
returncode = self._run_job_redirect(job)
else:
returncode = self._run_job(job)
except OSError, e:
console.error('%s: Create test process error: %s' %
(target_key, str(e)))
returncode = 255
costtime = time.time() - start_time
if returncode:
target.data['test_exit_code'] = returncode
self.failed_targets_lock.acquire()
self.failed_targets.append(target)
self.failed_targets_lock.release()
self.tests_run_map_lock.acquire()
run_item_map = self.tests_run_map.get(target.key, {})
if run_item_map:
run_item_map['result'] = self.__get_result(returncode)
run_item_map['costtime'] = costtime
self.tests_run_map_lock.release()
self.num_of_run_tests_lock.acquire()
self.num_of_run_tests += 1
self.num_of_run_tests_lock.release()
return True
def print_summary(self):
"""print the summary output of tests. """
console.info('There are %d tests scheduled to run by scheduler' % (len(self.tests_list)))
def _join_thread(self, t):
"""Join thread and keep signal awareable"""
# The Thread.join without timeout will block signals, which makes
# blade can't be terminated by Ctrl-C
while t.isAlive():
t.join(1)
def schedule_jobs(self):
"""scheduler. """
if self.num_of_tests <= 0:
return True
num_of_workers = self.__get_workers_num()
console.info('spawn %d worker(s) to run tests' % num_of_workers)
for i in self.tests_list:
target = i[0]
if target.data.get('exclusive'):
self.exclusive_job_queue.put(i)
else:
self.job_queue.put(i)
test_arg = [self.job_queue, num_of_workers > 1]
for i in range(num_of_workers):
t = WorkerThread((i), self._process_command, args=test_arg)
t.start()
self.threads.append(t)
for t in self.threads:
self._join_thread(t)
if not self.exclusive_job_queue.empty():
console.info('spawn 1 worker to run exclusive tests')
test_arg = [self.exclusive_job_queue, False]
last_t = WorkerThread((num_of_workers), self._process_command, args=test_arg)
last_t.start()
self._join_thread(last_t)
self.print_summary()
return True
| bsd-3-clause | -8,000,737,057,557,708,000 | 32.713693 | 97 | 0.538338 | false |
dparlevliet/zelenka-report-storage | server-db/twisted/conch/test/test_userauth.py | 4 | 39526 | # -*- test-case-name: twisted.conch.test.test_userauth -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the implementation of the ssh-userauth service.
Maintainer: Paul Swartz
"""
from zope.interface import implements
from twisted.cred.checkers import ICredentialsChecker
from twisted.cred.credentials import IUsernamePassword, ISSHPrivateKey
from twisted.cred.credentials import IPluggableAuthenticationModules
from twisted.cred.credentials import IAnonymous
from twisted.cred.error import UnauthorizedLogin
from twisted.cred.portal import IRealm, Portal
from twisted.conch.error import ConchError, ValidPublicKey
from twisted.internet import defer, task
from twisted.protocols import loopback
from twisted.trial import unittest
try:
import Crypto.Cipher.DES3
import pyasn1
except ImportError:
keys = None
class transport:
class SSHTransportBase:
"""
A stub class so that later class definitions won't die.
"""
class userauth:
class SSHUserAuthClient:
"""
A stub class so that later class definitions won't die.
"""
else:
from twisted.conch.ssh.common import NS
from twisted.conch.checkers import SSHProtocolChecker
from twisted.conch.ssh import keys, userauth, transport
from twisted.conch.test import keydata
class ClientUserAuth(userauth.SSHUserAuthClient):
"""
A mock user auth client.
"""
def getPublicKey(self):
"""
If this is the first time we've been called, return a blob for
the DSA key. Otherwise, return a blob
for the RSA key.
"""
if self.lastPublicKey:
return keys.Key.fromString(keydata.publicRSA_openssh)
else:
return defer.succeed(keys.Key.fromString(keydata.publicDSA_openssh))
def getPrivateKey(self):
"""
Return the private key object for the RSA key.
"""
return defer.succeed(keys.Key.fromString(keydata.privateRSA_openssh))
def getPassword(self, prompt=None):
"""
Return 'foo' as the password.
"""
return defer.succeed('foo')
def getGenericAnswers(self, name, information, answers):
"""
Return 'foo' as the answer to two questions.
"""
return defer.succeed(('foo', 'foo'))
class OldClientAuth(userauth.SSHUserAuthClient):
"""
The old SSHUserAuthClient returned a PyCrypto key object from
getPrivateKey() and a string from getPublicKey
"""
def getPrivateKey(self):
return defer.succeed(keys.Key.fromString(
keydata.privateRSA_openssh).keyObject)
def getPublicKey(self):
return keys.Key.fromString(keydata.publicRSA_openssh).blob()
class ClientAuthWithoutPrivateKey(userauth.SSHUserAuthClient):
"""
This client doesn't have a private key, but it does have a public key.
"""
def getPrivateKey(self):
return
def getPublicKey(self):
return keys.Key.fromString(keydata.publicRSA_openssh)
class FakeTransport(transport.SSHTransportBase):
"""
L{userauth.SSHUserAuthServer} expects an SSH transport which has a factory
attribute which has a portal attribute. Because the portal is important for
testing authentication, we need to be able to provide an interesting portal
object to the L{SSHUserAuthServer}.
In addition, we want to be able to capture any packets sent over the
transport.
@ivar packets: a list of 2-tuples: (messageType, data). Each 2-tuple is
a sent packet.
@type packets: C{list}
@param lostConnecion: True if loseConnection has been called on us.
@type lostConnection: C{bool}
"""
class Service(object):
"""
A mock service, representing the other service offered by the server.
"""
name = 'nancy'
def serviceStarted(self):
pass
class Factory(object):
"""
A mock factory, representing the factory that spawned this user auth
service.
"""
def getService(self, transport, service):
"""
Return our fake service.
"""
if service == 'none':
return FakeTransport.Service
def __init__(self, portal):
self.factory = self.Factory()
self.factory.portal = portal
self.lostConnection = False
self.transport = self
self.packets = []
def sendPacket(self, messageType, message):
"""
Record the packet sent by the service.
"""
self.packets.append((messageType, message))
def isEncrypted(self, direction):
"""
Pretend that this transport encrypts traffic in both directions. The
SSHUserAuthServer disables password authentication if the transport
isn't encrypted.
"""
return True
def loseConnection(self):
self.lostConnection = True
class Realm(object):
"""
A mock realm for testing L{userauth.SSHUserAuthServer}.
This realm is not actually used in the course of testing, so it returns the
simplest thing that could possibly work.
"""
implements(IRealm)
def requestAvatar(self, avatarId, mind, *interfaces):
return defer.succeed((interfaces[0], None, lambda: None))
class PasswordChecker(object):
"""
A very simple username/password checker which authenticates anyone whose
password matches their username and rejects all others.
"""
credentialInterfaces = (IUsernamePassword,)
implements(ICredentialsChecker)
def requestAvatarId(self, creds):
if creds.username == creds.password:
return defer.succeed(creds.username)
return defer.fail(UnauthorizedLogin("Invalid username/password pair"))
class PrivateKeyChecker(object):
"""
A very simple public key checker which authenticates anyone whose
public/private keypair is the same keydata.public/privateRSA_openssh.
"""
credentialInterfaces = (ISSHPrivateKey,)
implements(ICredentialsChecker)
def requestAvatarId(self, creds):
if creds.blob == keys.Key.fromString(keydata.publicRSA_openssh).blob():
if creds.signature is not None:
obj = keys.Key.fromString(creds.blob)
if obj.verify(creds.signature, creds.sigData):
return creds.username
else:
raise ValidPublicKey()
raise UnauthorizedLogin()
class PAMChecker(object):
"""
A simple PAM checker which asks the user for a password, verifying them
if the password is the same as their username.
"""
credentialInterfaces = (IPluggableAuthenticationModules,)
implements(ICredentialsChecker)
def requestAvatarId(self, creds):
d = creds.pamConversion([('Name: ', 2), ("Password: ", 1)])
def check(values):
if values == [(creds.username, 0), (creds.username, 0)]:
return creds.username
raise UnauthorizedLogin()
return d.addCallback(check)
class AnonymousChecker(object):
"""
A simple checker which isn't supported by L{SSHUserAuthServer}.
"""
credentialInterfaces = (IAnonymous,)
implements(ICredentialsChecker)
class SSHUserAuthServerTestCase(unittest.TestCase):
"""
Tests for SSHUserAuthServer.
"""
if keys is None:
skip = "cannot run w/o PyCrypto"
def setUp(self):
self.realm = Realm()
self.portal = Portal(self.realm)
self.portal.registerChecker(PasswordChecker())
self.portal.registerChecker(PrivateKeyChecker())
self.portal.registerChecker(PAMChecker())
self.authServer = userauth.SSHUserAuthServer()
self.authServer.transport = FakeTransport(self.portal)
self.authServer.serviceStarted()
self.authServer.supportedAuthentications.sort() # give a consistent
# order
def tearDown(self):
self.authServer.serviceStopped()
self.authServer = None
def _checkFailed(self, ignored):
"""
Check that the authentication has failed.
"""
self.assertEqual(self.authServer.transport.packets[-1],
(userauth.MSG_USERAUTH_FAILURE,
NS('keyboard-interactive,password,publickey') + '\x00'))
def test_noneAuthentication(self):
"""
A client may request a list of authentication 'method name' values
that may continue by using the "none" authentication 'method name'.
See RFC 4252 Section 5.2.
"""
d = self.authServer.ssh_USERAUTH_REQUEST(NS('foo') + NS('service') +
NS('none'))
return d.addCallback(self._checkFailed)
def test_successfulPasswordAuthentication(self):
"""
When provided with correct password authentication information, the
server should respond by sending a MSG_USERAUTH_SUCCESS message with
no other data.
See RFC 4252, Section 5.1.
"""
packet = NS('foo') + NS('none') + NS('password') + chr(0) + NS('foo')
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
def check(ignored):
self.assertEqual(
self.authServer.transport.packets,
[(userauth.MSG_USERAUTH_SUCCESS, '')])
return d.addCallback(check)
def test_failedPasswordAuthentication(self):
"""
When provided with invalid authentication details, the server should
respond by sending a MSG_USERAUTH_FAILURE message which states whether
the authentication was partially successful, and provides other, open
options for authentication.
See RFC 4252, Section 5.1.
"""
# packet = username, next_service, authentication type, FALSE, password
packet = NS('foo') + NS('none') + NS('password') + chr(0) + NS('bar')
self.authServer.clock = task.Clock()
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.assertEqual(self.authServer.transport.packets, [])
self.authServer.clock.advance(2)
return d.addCallback(self._checkFailed)
def test_successfulPrivateKeyAuthentication(self):
"""
Test that private key authentication completes sucessfully,
"""
blob = keys.Key.fromString(keydata.publicRSA_openssh).blob()
obj = keys.Key.fromString(keydata.privateRSA_openssh)
packet = (NS('foo') + NS('none') + NS('publickey') + '\xff'
+ NS(obj.sshType()) + NS(blob))
self.authServer.transport.sessionID = 'test'
signature = obj.sign(NS('test') + chr(userauth.MSG_USERAUTH_REQUEST)
+ packet)
packet += NS(signature)
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
def check(ignored):
self.assertEqual(self.authServer.transport.packets,
[(userauth.MSG_USERAUTH_SUCCESS, '')])
return d.addCallback(check)
def test_requestRaisesConchError(self):
"""
ssh_USERAUTH_REQUEST should raise a ConchError if tryAuth returns
None. Added to catch a bug noticed by pyflakes.
"""
d = defer.Deferred()
def mockCbFinishedAuth(self, ignored):
self.fail('request should have raised ConochError')
def mockTryAuth(kind, user, data):
return None
def mockEbBadAuth(reason):
d.errback(reason.value)
self.patch(self.authServer, 'tryAuth', mockTryAuth)
self.patch(self.authServer, '_cbFinishedAuth', mockCbFinishedAuth)
self.patch(self.authServer, '_ebBadAuth', mockEbBadAuth)
packet = NS('user') + NS('none') + NS('public-key') + NS('data')
# If an error other than ConchError is raised, this will trigger an
# exception.
self.authServer.ssh_USERAUTH_REQUEST(packet)
return self.assertFailure(d, ConchError)
def test_verifyValidPrivateKey(self):
"""
Test that verifying a valid private key works.
"""
blob = keys.Key.fromString(keydata.publicRSA_openssh).blob()
packet = (NS('foo') + NS('none') + NS('publickey') + '\x00'
+ NS('ssh-rsa') + NS(blob))
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
def check(ignored):
self.assertEqual(self.authServer.transport.packets,
[(userauth.MSG_USERAUTH_PK_OK, NS('ssh-rsa') + NS(blob))])
return d.addCallback(check)
def test_failedPrivateKeyAuthenticationWithoutSignature(self):
"""
Test that private key authentication fails when the public key
is invalid.
"""
blob = keys.Key.fromString(keydata.publicDSA_openssh).blob()
packet = (NS('foo') + NS('none') + NS('publickey') + '\x00'
+ NS('ssh-dsa') + NS(blob))
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
return d.addCallback(self._checkFailed)
def test_failedPrivateKeyAuthenticationWithSignature(self):
"""
Test that private key authentication fails when the public key
is invalid.
"""
blob = keys.Key.fromString(keydata.publicRSA_openssh).blob()
obj = keys.Key.fromString(keydata.privateRSA_openssh)
packet = (NS('foo') + NS('none') + NS('publickey') + '\xff'
+ NS('ssh-rsa') + NS(blob) + NS(obj.sign(blob)))
self.authServer.transport.sessionID = 'test'
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
return d.addCallback(self._checkFailed)
def test_successfulPAMAuthentication(self):
"""
Test that keyboard-interactive authentication succeeds.
"""
packet = (NS('foo') + NS('none') + NS('keyboard-interactive')
+ NS('') + NS(''))
response = '\x00\x00\x00\x02' + NS('foo') + NS('foo')
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.ssh_USERAUTH_INFO_RESPONSE(response)
def check(ignored):
self.assertEqual(self.authServer.transport.packets,
[(userauth.MSG_USERAUTH_INFO_REQUEST, (NS('') + NS('')
+ NS('') + '\x00\x00\x00\x02' + NS('Name: ') + '\x01'
+ NS('Password: ') + '\x00')),
(userauth.MSG_USERAUTH_SUCCESS, '')])
return d.addCallback(check)
def test_failedPAMAuthentication(self):
"""
Test that keyboard-interactive authentication fails.
"""
packet = (NS('foo') + NS('none') + NS('keyboard-interactive')
+ NS('') + NS(''))
response = '\x00\x00\x00\x02' + NS('bar') + NS('bar')
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.ssh_USERAUTH_INFO_RESPONSE(response)
def check(ignored):
self.assertEqual(self.authServer.transport.packets[0],
(userauth.MSG_USERAUTH_INFO_REQUEST, (NS('') + NS('')
+ NS('') + '\x00\x00\x00\x02' + NS('Name: ') + '\x01'
+ NS('Password: ') + '\x00')))
return d.addCallback(check).addCallback(self._checkFailed)
def test_invalid_USERAUTH_INFO_RESPONSE_not_enough_data(self):
"""
If ssh_USERAUTH_INFO_RESPONSE gets an invalid packet,
the user authentication should fail.
"""
packet = (NS('foo') + NS('none') + NS('keyboard-interactive')
+ NS('') + NS(''))
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.ssh_USERAUTH_INFO_RESPONSE(NS('\x00\x00\x00\x00' +
NS('hi')))
return d.addCallback(self._checkFailed)
def test_invalid_USERAUTH_INFO_RESPONSE_too_much_data(self):
"""
If ssh_USERAUTH_INFO_RESPONSE gets too much data, the user
authentication should fail.
"""
packet = (NS('foo') + NS('none') + NS('keyboard-interactive')
+ NS('') + NS(''))
response = '\x00\x00\x00\x02' + NS('foo') + NS('foo') + NS('foo')
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.ssh_USERAUTH_INFO_RESPONSE(response)
return d.addCallback(self._checkFailed)
def test_onlyOnePAMAuthentication(self):
"""
Because it requires an intermediate message, one can't send a second
keyboard-interactive request while the first is still pending.
"""
packet = (NS('foo') + NS('none') + NS('keyboard-interactive')
+ NS('') + NS(''))
self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.ssh_USERAUTH_REQUEST(packet)
self.assertEqual(self.authServer.transport.packets[-1][0],
transport.MSG_DISCONNECT)
self.assertEqual(self.authServer.transport.packets[-1][1][3],
chr(transport.DISCONNECT_PROTOCOL_ERROR))
def test_ignoreUnknownCredInterfaces(self):
"""
L{SSHUserAuthServer} sets up
C{SSHUserAuthServer.supportedAuthentications} by checking the portal's
credentials interfaces and mapping them to SSH authentication method
strings. If the Portal advertises an interface that
L{SSHUserAuthServer} can't map, it should be ignored. This is a white
box test.
"""
server = userauth.SSHUserAuthServer()
server.transport = FakeTransport(self.portal)
self.portal.registerChecker(AnonymousChecker())
server.serviceStarted()
server.serviceStopped()
server.supportedAuthentications.sort() # give a consistent order
self.assertEqual(server.supportedAuthentications,
['keyboard-interactive', 'password', 'publickey'])
def test_removePasswordIfUnencrypted(self):
"""
Test that the userauth service does not advertise password
authentication if the password would be send in cleartext.
"""
self.assertIn('password', self.authServer.supportedAuthentications)
# no encryption
clearAuthServer = userauth.SSHUserAuthServer()
clearAuthServer.transport = FakeTransport(self.portal)
clearAuthServer.transport.isEncrypted = lambda x: False
clearAuthServer.serviceStarted()
clearAuthServer.serviceStopped()
self.assertNotIn('password', clearAuthServer.supportedAuthentications)
# only encrypt incoming (the direction the password is sent)
halfAuthServer = userauth.SSHUserAuthServer()
halfAuthServer.transport = FakeTransport(self.portal)
halfAuthServer.transport.isEncrypted = lambda x: x == 'in'
halfAuthServer.serviceStarted()
halfAuthServer.serviceStopped()
self.assertIn('password', halfAuthServer.supportedAuthentications)
def test_removeKeyboardInteractiveIfUnencrypted(self):
"""
Test that the userauth service does not advertise keyboard-interactive
authentication if the password would be send in cleartext.
"""
self.assertIn('keyboard-interactive',
self.authServer.supportedAuthentications)
# no encryption
clearAuthServer = userauth.SSHUserAuthServer()
clearAuthServer.transport = FakeTransport(self.portal)
clearAuthServer.transport.isEncrypted = lambda x: False
clearAuthServer.serviceStarted()
clearAuthServer.serviceStopped()
self.assertNotIn(
'keyboard-interactive', clearAuthServer.supportedAuthentications)
# only encrypt incoming (the direction the password is sent)
halfAuthServer = userauth.SSHUserAuthServer()
halfAuthServer.transport = FakeTransport(self.portal)
halfAuthServer.transport.isEncrypted = lambda x: x == 'in'
halfAuthServer.serviceStarted()
halfAuthServer.serviceStopped()
self.assertIn('keyboard-interactive',
halfAuthServer.supportedAuthentications)
def test_unencryptedConnectionWithoutPasswords(self):
"""
If the L{SSHUserAuthServer} is not advertising passwords, then an
unencrypted connection should not cause any warnings or exceptions.
This is a white box test.
"""
# create a Portal without password authentication
portal = Portal(self.realm)
portal.registerChecker(PrivateKeyChecker())
# no encryption
clearAuthServer = userauth.SSHUserAuthServer()
clearAuthServer.transport = FakeTransport(portal)
clearAuthServer.transport.isEncrypted = lambda x: False
clearAuthServer.serviceStarted()
clearAuthServer.serviceStopped()
self.assertEqual(clearAuthServer.supportedAuthentications,
['publickey'])
# only encrypt incoming (the direction the password is sent)
halfAuthServer = userauth.SSHUserAuthServer()
halfAuthServer.transport = FakeTransport(portal)
halfAuthServer.transport.isEncrypted = lambda x: x == 'in'
halfAuthServer.serviceStarted()
halfAuthServer.serviceStopped()
self.assertEqual(clearAuthServer.supportedAuthentications,
['publickey'])
def test_loginTimeout(self):
"""
Test that the login times out.
"""
timeoutAuthServer = userauth.SSHUserAuthServer()
timeoutAuthServer.clock = task.Clock()
timeoutAuthServer.transport = FakeTransport(self.portal)
timeoutAuthServer.serviceStarted()
timeoutAuthServer.clock.advance(11 * 60 * 60)
timeoutAuthServer.serviceStopped()
self.assertEqual(timeoutAuthServer.transport.packets,
[(transport.MSG_DISCONNECT,
'\x00' * 3 +
chr(transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE) +
NS("you took too long") + NS(''))])
self.assertTrue(timeoutAuthServer.transport.lostConnection)
def test_cancelLoginTimeout(self):
"""
Test that stopping the service also stops the login timeout.
"""
timeoutAuthServer = userauth.SSHUserAuthServer()
timeoutAuthServer.clock = task.Clock()
timeoutAuthServer.transport = FakeTransport(self.portal)
timeoutAuthServer.serviceStarted()
timeoutAuthServer.serviceStopped()
timeoutAuthServer.clock.advance(11 * 60 * 60)
self.assertEqual(timeoutAuthServer.transport.packets, [])
self.assertFalse(timeoutAuthServer.transport.lostConnection)
def test_tooManyAttempts(self):
"""
Test that the server disconnects if the client fails authentication
too many times.
"""
packet = NS('foo') + NS('none') + NS('password') + chr(0) + NS('bar')
self.authServer.clock = task.Clock()
for i in range(21):
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
self.authServer.clock.advance(2)
def check(ignored):
self.assertEqual(self.authServer.transport.packets[-1],
(transport.MSG_DISCONNECT,
'\x00' * 3 +
chr(transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE) +
NS("too many bad auths") + NS('')))
return d.addCallback(check)
def test_failIfUnknownService(self):
"""
If the user requests a service that we don't support, the
authentication should fail.
"""
packet = NS('foo') + NS('') + NS('password') + chr(0) + NS('foo')
self.authServer.clock = task.Clock()
d = self.authServer.ssh_USERAUTH_REQUEST(packet)
return d.addCallback(self._checkFailed)
def test__pamConvErrors(self):
"""
_pamConv should fail if it gets a message that's not 1 or 2.
"""
def secondTest(ignored):
d2 = self.authServer._pamConv([('', 90)])
return self.assertFailure(d2, ConchError)
d = self.authServer._pamConv([('', 3)])
return self.assertFailure(d, ConchError).addCallback(secondTest)
def test_tryAuthEdgeCases(self):
"""
tryAuth() has two edge cases that are difficult to reach.
1) an authentication method auth_* returns None instead of a Deferred.
2) an authentication type that is defined does not have a matching
auth_* method.
Both these cases should return a Deferred which fails with a
ConchError.
"""
def mockAuth(packet):
return None
self.patch(self.authServer, 'auth_publickey', mockAuth) # first case
self.patch(self.authServer, 'auth_password', None) # second case
def secondTest(ignored):
d2 = self.authServer.tryAuth('password', None, None)
return self.assertFailure(d2, ConchError)
d1 = self.authServer.tryAuth('publickey', None, None)
return self.assertFailure(d1, ConchError).addCallback(secondTest)
class SSHUserAuthClientTestCase(unittest.TestCase):
"""
Tests for SSHUserAuthClient.
"""
if keys is None:
skip = "cannot run w/o PyCrypto"
def setUp(self):
self.authClient = ClientUserAuth('foo', FakeTransport.Service())
self.authClient.transport = FakeTransport(None)
self.authClient.transport.sessionID = 'test'
self.authClient.serviceStarted()
def tearDown(self):
self.authClient.serviceStopped()
self.authClient = None
def test_init(self):
"""
Test that client is initialized properly.
"""
self.assertEqual(self.authClient.user, 'foo')
self.assertEqual(self.authClient.instance.name, 'nancy')
self.assertEqual(self.authClient.transport.packets,
[(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('none'))])
def test_USERAUTH_SUCCESS(self):
"""
Test that the client succeeds properly.
"""
instance = [None]
def stubSetService(service):
instance[0] = service
self.authClient.transport.setService = stubSetService
self.authClient.ssh_USERAUTH_SUCCESS('')
self.assertEqual(instance[0], self.authClient.instance)
def test_publickey(self):
"""
Test that the client can authenticate with a public key.
"""
self.authClient.ssh_USERAUTH_FAILURE(NS('publickey') + '\x00')
self.assertEqual(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('publickey') + '\x00' + NS('ssh-dss')
+ NS(keys.Key.fromString(
keydata.publicDSA_openssh).blob())))
# that key isn't good
self.authClient.ssh_USERAUTH_FAILURE(NS('publickey') + '\x00')
blob = NS(keys.Key.fromString(keydata.publicRSA_openssh).blob())
self.assertEqual(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, (NS('foo') + NS('nancy')
+ NS('publickey') + '\x00'+ NS('ssh-rsa') + blob)))
self.authClient.ssh_USERAUTH_PK_OK(NS('ssh-rsa')
+ NS(keys.Key.fromString(keydata.publicRSA_openssh).blob()))
sigData = (NS(self.authClient.transport.sessionID)
+ chr(userauth.MSG_USERAUTH_REQUEST) + NS('foo')
+ NS('nancy') + NS('publickey') + '\x01' + NS('ssh-rsa')
+ blob)
obj = keys.Key.fromString(keydata.privateRSA_openssh)
self.assertEqual(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('publickey') + '\x01' + NS('ssh-rsa') + blob
+ NS(obj.sign(sigData))))
def test_publickey_without_privatekey(self):
"""
If the SSHUserAuthClient doesn't return anything from signData,
the client should start the authentication over again by requesting
'none' authentication.
"""
authClient = ClientAuthWithoutPrivateKey('foo',
FakeTransport.Service())
authClient.transport = FakeTransport(None)
authClient.transport.sessionID = 'test'
authClient.serviceStarted()
authClient.tryAuth('publickey')
authClient.transport.packets = []
self.assertIs(authClient.ssh_USERAUTH_PK_OK(''), None)
self.assertEqual(authClient.transport.packets, [
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy') +
NS('none'))])
def test_old_publickey_getPublicKey(self):
"""
Old SSHUserAuthClients returned strings of public key blobs from
getPublicKey(). Test that a Deprecation warning is raised but the key is
verified correctly.
"""
oldAuth = OldClientAuth('foo', FakeTransport.Service())
oldAuth.transport = FakeTransport(None)
oldAuth.transport.sessionID = 'test'
oldAuth.serviceStarted()
oldAuth.transport.packets = []
self.assertWarns(DeprecationWarning, "Returning a string from "
"SSHUserAuthClient.getPublicKey() is deprecated since "
"Twisted 9.0. Return a keys.Key() instead.",
userauth.__file__, oldAuth.tryAuth, 'publickey')
self.assertEqual(oldAuth.transport.packets, [
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy') +
NS('publickey') + '\x00' + NS('ssh-rsa') +
NS(keys.Key.fromString(keydata.publicRSA_openssh).blob()))])
def test_old_publickey_getPrivateKey(self):
"""
Old SSHUserAuthClients returned a PyCrypto key object from
getPrivateKey(). Test that _cbSignData signs the data warns the
user about the deprecation, but signs the data correctly.
"""
oldAuth = OldClientAuth('foo', FakeTransport.Service())
d = self.assertWarns(DeprecationWarning, "Returning a PyCrypto key "
"object from SSHUserAuthClient.getPrivateKey() is "
"deprecated since Twisted 9.0. "
"Return a keys.Key() instead.", userauth.__file__,
oldAuth.signData, None, 'data')
def _checkSignedData(sig):
self.assertEqual(sig,
keys.Key.fromString(keydata.privateRSA_openssh).sign(
'data'))
d.addCallback(_checkSignedData)
return d
def test_no_publickey(self):
"""
If there's no public key, auth_publickey should return a Deferred
called back with a False value.
"""
self.authClient.getPublicKey = lambda x: None
d = self.authClient.tryAuth('publickey')
def check(result):
self.assertFalse(result)
return d.addCallback(check)
def test_password(self):
"""
Test that the client can authentication with a password. This
includes changing the password.
"""
self.authClient.ssh_USERAUTH_FAILURE(NS('password') + '\x00')
self.assertEqual(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('password') + '\x00' + NS('foo')))
self.authClient.ssh_USERAUTH_PK_OK(NS('') + NS(''))
self.assertEqual(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('password') + '\xff' + NS('foo') * 2))
def test_no_password(self):
"""
If getPassword returns None, tryAuth should return False.
"""
self.authClient.getPassword = lambda: None
self.assertFalse(self.authClient.tryAuth('password'))
def test_keyboardInteractive(self):
"""
Test that the client can authenticate using keyboard-interactive
authentication.
"""
self.authClient.ssh_USERAUTH_FAILURE(NS('keyboard-interactive')
+ '\x00')
self.assertEqual(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('keyboard-interactive') + NS('')*2))
self.authClient.ssh_USERAUTH_PK_OK(NS('')*3 + '\x00\x00\x00\x02'
+ NS('Name: ') + '\xff' + NS('Password: ') + '\x00')
self.assertEqual(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_INFO_RESPONSE, '\x00\x00\x00\x02'
+ NS('foo')*2))
def test_USERAUTH_PK_OK_unknown_method(self):
"""
If C{SSHUserAuthClient} gets a MSG_USERAUTH_PK_OK packet when it's not
expecting it, it should fail the current authentication and move on to
the next type.
"""
self.authClient.lastAuth = 'unknown'
self.authClient.transport.packets = []
self.authClient.ssh_USERAUTH_PK_OK('')
self.assertEqual(self.authClient.transport.packets,
[(userauth.MSG_USERAUTH_REQUEST, NS('foo') +
NS('nancy') + NS('none'))])
def test_USERAUTH_FAILURE_sorting(self):
"""
ssh_USERAUTH_FAILURE should sort the methods by their position
in SSHUserAuthClient.preferredOrder. Methods that are not in
preferredOrder should be sorted at the end of that list.
"""
def auth_firstmethod():
self.authClient.transport.sendPacket(255, 'here is data')
def auth_anothermethod():
self.authClient.transport.sendPacket(254, 'other data')
return True
self.authClient.auth_firstmethod = auth_firstmethod
self.authClient.auth_anothermethod = auth_anothermethod
# although they shouldn't get called, method callbacks auth_* MUST
# exist in order for the test to work properly.
self.authClient.ssh_USERAUTH_FAILURE(NS('anothermethod,password') +
'\x00')
# should send password packet
self.assertEqual(self.authClient.transport.packets[-1],
(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('password') + '\x00' + NS('foo')))
self.authClient.ssh_USERAUTH_FAILURE(
NS('firstmethod,anothermethod,password') + '\xff')
self.assertEqual(self.authClient.transport.packets[-2:],
[(255, 'here is data'), (254, 'other data')])
def test_disconnectIfNoMoreAuthentication(self):
"""
If there are no more available user authentication messages,
the SSHUserAuthClient should disconnect with code
DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE.
"""
self.authClient.ssh_USERAUTH_FAILURE(NS('password') + '\x00')
self.authClient.ssh_USERAUTH_FAILURE(NS('password') + '\xff')
self.assertEqual(self.authClient.transport.packets[-1],
(transport.MSG_DISCONNECT, '\x00\x00\x00\x0e' +
NS('no more authentication methods available') +
'\x00\x00\x00\x00'))
def test_ebAuth(self):
"""
_ebAuth (the generic authentication error handler) should send
a request for the 'none' authentication method.
"""
self.authClient.transport.packets = []
self.authClient._ebAuth(None)
self.assertEqual(self.authClient.transport.packets,
[(userauth.MSG_USERAUTH_REQUEST, NS('foo') + NS('nancy')
+ NS('none'))])
def test_defaults(self):
"""
getPublicKey() should return None. getPrivateKey() should return a
failed Deferred. getPassword() should return a failed Deferred.
getGenericAnswers() should return a failed Deferred.
"""
authClient = userauth.SSHUserAuthClient('foo', FakeTransport.Service())
self.assertIs(authClient.getPublicKey(), None)
def check(result):
result.trap(NotImplementedError)
d = authClient.getPassword()
return d.addCallback(self.fail).addErrback(check2)
def check2(result):
result.trap(NotImplementedError)
d = authClient.getGenericAnswers(None, None, None)
return d.addCallback(self.fail).addErrback(check3)
def check3(result):
result.trap(NotImplementedError)
d = authClient.getPrivateKey()
return d.addCallback(self.fail).addErrback(check)
class LoopbackTestCase(unittest.TestCase):
if keys is None:
skip = "cannot run w/o PyCrypto or PyASN1"
class Factory:
class Service:
name = 'TestService'
def serviceStarted(self):
self.transport.loseConnection()
def serviceStopped(self):
pass
def getService(self, avatar, name):
return self.Service
def test_loopback(self):
"""
Test that the userauth server and client play nicely with each other.
"""
server = userauth.SSHUserAuthServer()
client = ClientUserAuth('foo', self.Factory.Service())
# set up transports
server.transport = transport.SSHTransportBase()
server.transport.service = server
server.transport.isEncrypted = lambda x: True
client.transport = transport.SSHTransportBase()
client.transport.service = client
server.transport.sessionID = client.transport.sessionID = ''
# don't send key exchange packet
server.transport.sendKexInit = client.transport.sendKexInit = \
lambda: None
# set up server authentication
server.transport.factory = self.Factory()
server.passwordDelay = 0 # remove bad password delay
realm = Realm()
portal = Portal(realm)
checker = SSHProtocolChecker()
checker.registerChecker(PasswordChecker())
checker.registerChecker(PrivateKeyChecker())
checker.registerChecker(PAMChecker())
checker.areDone = lambda aId: (
len(checker.successfulCredentials[aId]) == 3)
portal.registerChecker(checker)
server.transport.factory.portal = portal
d = loopback.loopbackAsync(server.transport, client.transport)
server.transport.transport.logPrefix = lambda: '_ServerLoopback'
client.transport.transport.logPrefix = lambda: '_ClientLoopback'
server.serviceStarted()
client.serviceStarted()
def check(ignored):
self.assertEqual(server.transport.service.name, 'TestService')
return d.addCallback(check)
class ModuleInitializationTestCase(unittest.TestCase):
if keys is None:
skip = "cannot run w/o PyCrypto or PyASN1"
def test_messages(self):
# Several message types have value 60, check that MSG_USERAUTH_PK_OK
# is always the one which is mapped.
self.assertEqual(userauth.SSHUserAuthServer.protocolMessages[60],
'MSG_USERAUTH_PK_OK')
self.assertEqual(userauth.SSHUserAuthClient.protocolMessages[60],
'MSG_USERAUTH_PK_OK')
| lgpl-3.0 | -1,446,955,946,522,393,300 | 35.700093 | 81 | 0.61939 | false |
luckielordie/conan | conans/test/built_type_setting_test.py | 2 | 2198 | import unittest
from conans.test.utils.tools import TestClient
class BuildTypeSettingTest(unittest.TestCase):
def test_build_type(self):
# https://github.com/conan-io/conan/issues/2500
client = TestClient()
conanfile = """from conans import ConanFile
class Pkg(ConanFile):
settings = "build_type"
def build(self):
self.output.info("BUILD TYPE: %s" % (self.settings.build_type or "Not defined"))
"""
test_conanfile = """from conans import ConanFile
class Pkg(ConanFile):
settings = "build_type"
def build(self):
self.output.info("BUILD TYPE: %s" % (self.settings.build_type or "Not defined"))
def test(self):
pass
"""
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test_conanfile,
"myprofile": ""})
# This won't fail, as it has a build_type=None, which is allowed
client.run("export . Pkg/0.1@lasote/testing")
client.run("install Pkg/0.1@lasote/testing -pr=myprofile --build")
self.assertEqual(1, str(client.out).count("BUILD TYPE: Not defined"))
# This is an error. test_package/conanfile won't have build_type defined, more restrictive
error = client.run("create . Pkg/0.1@lasote/testing -pr=myprofile", ignore_error=True)
self.assertTrue(error)
self.assertEqual(1, str(client.out).count("BUILD TYPE: Not defined"))
self.assertIn("ConanException: 'settings.build_type' doesn't exist", client.out)
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test_conanfile,
"myprofile": "[settings]\nbuild_type=None"})
# This won't fail, as it has a build_type=None, which is allowed
client.run("export . Pkg/0.1@lasote/testing")
client.run("install Pkg/0.1@lasote/testing -pr=myprofile --build")
self.assertEqual(1, str(client.out).count("BUILD TYPE: Not defined"))
# This is NOT an error. build_type has a value = None
client.run("create . Pkg/0.1@lasote/testing -pr=myprofile")
self.assertEqual(2, str(client.out).count("BUILD TYPE: Not defined"))
| mit | 3,625,856,705,286,749,000 | 42.96 | 98 | 0.636033 | false |
GreenleafLab/NucleoATAC | nucleoatac/run_occ.py | 1 | 5567 | """
Script to make nucleosome occupancy track!
@author: Alicia Schep
"""
##### IMPORT MODULES #####
# import necessary python modules
#import matplotlib as mpl
#mpl.use('PS')
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import traceback
import itertools
import pysam
from pyatac.utils import shell_command,read_chrom_sizes_from_bam, read_chrom_sizes_from_fasta
from pyatac.chunk import ChunkList
from nucleoatac.Occupancy import FragmentMixDistribution, OccupancyParameters, OccChunk
from pyatac.fragmentsizes import FragmentSizes
from pyatac.bias import PWM
def _occHelper(arg):
"""function to get occupancy for a set of bed regions
"""
(chunk, params) = arg
try:
occ = OccChunk(chunk)
occ.process(params)
out = (occ.getNucDist(),
occ.occ, [occ.peaks[i] for i in sorted(occ.peaks.keys())])
occ.removeData()
except Exception as e:
print('Caught exception when processing:\n'+ chunk.asBed()+"\n")
traceback.print_exc()
print()
raise e
return out
def _writeOcc(track_queue, out):
out_handle1 = open(out + '.occ.bedgraph','a')
out_handle2 = open(out + '.occ.lower_bound.bedgraph','a')
out_handle3 = open(out + '.occ.upper_bound.bedgraph','a')
try:
for track in iter(track_queue.get, 'STOP'):
track.write_track(out_handle1, vals = track.smoothed_vals)
track.write_track(out_handle2, vals = track.smoothed_lower)
track.write_track(out_handle3, vals = track.smoothed_upper)
track_queue.task_done()
except Exception, e:
print('Caught exception when writing occupancy track\n')
traceback.print_exc()
print()
raise e
out_handle1.close()
out_handle2.close()
out_handle3.close()
return True
def _writePeaks(pos_queue, out):
out_handle = open(out + '.occpeaks.bed','a')
try:
for poslist in iter(pos_queue.get, 'STOP'):
for pos in poslist:
pos.write(out_handle)
pos_queue.task_done()
except Exception, e:
print('Caught exception when writing occupancy track\n')
traceback.print_exc()
print()
raise e
out_handle.close()
return True
def run_occ(args):
"""run occupancy calling
"""
if args.fasta:
chrs = read_chrom_sizes_from_fasta(args.fasta)
else:
chrs = read_chrom_sizes_from_bam(args.bam)
pwm = PWM.open(args.pwm)
chunks = ChunkList.read(args.bed, chromDict = chrs, min_offset = args.flank + args.upper/2 + max(pwm.up,pwm.down) + args.nuc_sep/2)
chunks.slop(chrs, up = args.nuc_sep/2, down = args.nuc_sep/2)
chunks.merge()
maxQueueSize = args.cores*10
fragment_dist = FragmentMixDistribution(0, upper = args.upper)
if args.sizes is not None:
tmp = FragmentSizes.open(args.sizes)
fragment_dist.fragmentsizes = FragmentSizes(0, args.upper, vals = tmp.get(0,args.upper))
else:
fragment_dist.getFragmentSizes(args.bam, chunks)
fragment_dist.modelNFR()
fragment_dist.plotFits(args.out + '.occ_fit.eps')
fragment_dist.fragmentsizes.save(args.out + '.fragmentsizes.txt')
params = OccupancyParameters(fragment_dist, args.upper, args.fasta, args.pwm, sep = args.nuc_sep, min_occ = args.min_occ,
flank = args.flank, bam = args.bam, ci = args.confidence_interval, step = args.step)
sets = chunks.split(items = args.cores * 5)
pool1 = mp.Pool(processes = max(1,args.cores-1))
out_handle1 = open(args.out + '.occ.bedgraph','w')
out_handle1.close()
out_handle2 = open(args.out + '.occ.lower_bound.bedgraph','w')
out_handle2.close()
out_handle3 = open(args.out + '.occ.upper_bound.bedgraph','w')
out_handle3.close()
write_queue = mp.JoinableQueue(maxsize = maxQueueSize)
write_process = mp.Process(target = _writeOcc, args=(write_queue, args.out))
write_process.start()
peaks_handle = open(args.out + '.occpeaks.bed','w')
peaks_handle.close()
peaks_queue = mp.JoinableQueue()
peaks_process = mp.Process(target = _writePeaks, args=(peaks_queue, args.out))
peaks_process.start()
nuc_dist = np.zeros(args.upper)
for j in sets:
tmp = pool1.map(_occHelper, zip(j,itertools.repeat(params)))
for result in tmp:
nuc_dist += result[0]
write_queue.put(result[1])
peaks_queue.put(result[2])
pool1.close()
pool1.join()
write_queue.put('STOP')
peaks_queue.put('STOP')
write_process.join()
peaks_process.join()
pysam.tabix_compress(args.out + '.occpeaks.bed', args.out + '.occpeaks.bed.gz',force = True)
shell_command('rm ' + args.out + '.occpeaks.bed')
pysam.tabix_index(args.out + '.occpeaks.bed.gz', preset = "bed", force = True)
for i in ('occ','occ.lower_bound','occ.upper_bound'):
pysam.tabix_compress(args.out + '.' + i + '.bedgraph', args.out + '.'+i+'.bedgraph.gz',force = True)
shell_command('rm ' + args.out + '.' + i + '.bedgraph')
pysam.tabix_index(args.out + '.' + i + '.bedgraph.gz', preset = "bed", force = True)
dist_out = FragmentSizes(0, args.upper, vals = nuc_dist)
dist_out.save(args.out + '.nuc_dist.txt')
print "Making figure"
#make figure
fig = plt.figure()
plt.plot(range(0,args.upper),dist_out.get(0,args.upper),label = "Nucleosome Distribution")
plt.xlabel("Fragment Size")
plt.ylabel("Frequency")
fig.savefig(args.out+'.nuc_dist.eps')
plt.close(fig)
| mit | -1,796,655,134,098,109,200 | 34.012579 | 135 | 0.637866 | false |
vipul-sharma20/oh-mainline | vendor/packages/twisted/twisted/web/google.py | 20 | 2091 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""\"I'm Feeling Lucky\" with U{Google<http://google.com>}.
"""
import urllib
from twisted.internet import protocol, reactor, defer
from twisted.web import http
class GoogleChecker(http.HTTPClient):
def connectionMade(self):
self.sendCommand('GET', self.factory.url)
self.sendHeader('Host', self.factory.host)
self.sendHeader('User-Agent', self.factory.agent)
self.endHeaders()
def handleHeader(self, key, value):
key = key.lower()
if key == 'location':
self.factory.gotLocation(value)
def handleStatus(self, version, status, message):
if status != '302':
self.factory.noLocation(ValueError("bad status"))
def handleEndHeaders(self):
self.factory.noLocation(ValueError("no location"))
def handleResponsePart(self, part):
pass
def handleResponseEnd(self):
pass
def connectionLost(self, reason):
self.factory.noLocation(reason)
class GoogleCheckerFactory(protocol.ClientFactory):
protocol = GoogleChecker
def __init__(self, words):
self.url = ('/search?q=%s&btnI=%s' %
(urllib.quote_plus(' '.join(words)),
urllib.quote_plus("I'm Feeling Lucky")))
self.agent="Twisted/GoogleChecker"
self.host = "www.google.com"
self.deferred = defer.Deferred()
def clientConnectionFailed(self, _, reason):
self.noLocation(reason)
def gotLocation(self, location):
if self.deferred:
self.deferred.callback(location)
self.deferred = None
def noLocation(self, error):
if self.deferred:
self.deferred.errback(error)
self.deferred = None
def checkGoogle(words):
"""Check google for a match.
@returns: a Deferred which will callback with a URL or errback with a
Failure.
"""
factory = GoogleCheckerFactory(words)
reactor.connectTCP('www.google.com', 80, factory)
return factory.deferred
| agpl-3.0 | -3,836,474,049,388,754,400 | 26.88 | 73 | 0.635581 | false |
iLoop2/ResInsight | ThirdParty/Ert/devel/python/python/ert/test/ert_test_runner.py | 1 | 1860 | import os
try:
from unittest2 import TestLoader, TextTestRunner
except ImportError:
from unittest import TestLoader, TextTestRunner
class ErtTestRunner(object):
@staticmethod
def runTestSuite(tests , test_verbosity = 3):
test_runner = TextTestRunner(verbosity=test_verbosity)
result = test_runner.run(tests)
return result.wasSuccessful()
@staticmethod
def findTestsInDirectory(path, recursive=True , pattern = "test*.py"):
loader = TestLoader()
test_suite = loader.discover(path , pattern = pattern)
for (root, dirnames, filenames) in os.walk( path ):
for directory in dirnames:
test_suite.addTests(ErtTestRunner.findTestsInDirectory(os.path.join(root, directory), recursive , pattern))
return test_suite
@staticmethod
def runTestsInDirectory(path=".", recursive=True, test_verbosity=3):
test_suite = ErtTestRunner.findTestsInDirectory(path, recursive)
return ErtTestRunner.runTestSuite(test_suite)
@staticmethod
def runTestsInClass(classpath, test_verbosity=3):
klass = ErtTestRunner.importClass(classpath)
loader = TestLoader()
tests = loader.loadTestsFromTestCase(klass)
testRunner = TextTestRunner(verbosity=test_verbosity)
testRunner.run(tests)
@staticmethod
def importClass(classpath):
dot = classpath.rfind(".")
class_name = classpath[dot + 1:len(classpath)]
m = __import__(classpath[0:dot], globals(), locals(), [class_name])
return getattr(m, class_name)
@staticmethod
def getTestsFromTestClass(test_class_path, argv=None):
klass = ErtTestRunner.importClass(test_class_path)
klass.argv = argv
loader = TestLoader()
return loader.loadTestsFromTestCase(klass)
| gpl-3.0 | 3,933,060,547,694,078,000 | 30.525424 | 123 | 0.67043 | false |
harmy/kbengine | kbe/res/scripts/common/Lib/wsgiref/util.py | 3 | 5760 | """Miscellaneous WSGI-related Utilities"""
import posixpath
__all__ = [
'FileWrapper', 'guess_scheme', 'application_uri', 'request_uri',
'shift_path_info', 'setup_testing_defaults',
]
class FileWrapper:
"""Wrapper to convert file-like objects to iterables"""
def __init__(self, filelike, blksize=8192):
self.filelike = filelike
self.blksize = blksize
if hasattr(filelike,'close'):
self.close = filelike.close
def __getitem__(self,key):
data = self.filelike.read(self.blksize)
if data:
return data
raise IndexError
def __iter__(self):
return self
def __next__(self):
data = self.filelike.read(self.blksize)
if data:
return data
raise StopIteration
def guess_scheme(environ):
"""Return a guess for whether 'wsgi.url_scheme' should be 'http' or 'https'
"""
if environ.get("HTTPS") in ('yes','on','1'):
return 'https'
else:
return 'http'
def application_uri(environ):
"""Return the application's base URI (no PATH_INFO or QUERY_STRING)"""
url = environ['wsgi.url_scheme']+'://'
from urllib.parse import quote
if environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
url += quote(environ.get('SCRIPT_NAME') or '/')
return url
def request_uri(environ, include_query=True):
"""Return the full request URI, optionally including the query string"""
url = application_uri(environ)
from urllib.parse import quote
path_info = quote(environ.get('PATH_INFO',''),safe='/;=,')
if not environ.get('SCRIPT_NAME'):
url += path_info[1:]
else:
url += path_info
if include_query and environ.get('QUERY_STRING'):
url += '?' + environ['QUERY_STRING']
return url
def shift_path_info(environ):
"""Shift a name from PATH_INFO to SCRIPT_NAME, returning it
If there are no remaining path segments in PATH_INFO, return None.
Note: 'environ' is modified in-place; use a copy if you need to keep
the original PATH_INFO or SCRIPT_NAME.
Note: when PATH_INFO is just a '/', this returns '' and appends a trailing
'/' to SCRIPT_NAME, even though empty path segments are normally ignored,
and SCRIPT_NAME doesn't normally end in a '/'. This is intentional
behavior, to ensure that an application can tell the difference between
'/x' and '/x/' when traversing to objects.
"""
path_info = environ.get('PATH_INFO','')
if not path_info:
return None
path_parts = path_info.split('/')
path_parts[1:-1] = [p for p in path_parts[1:-1] if p and p != '.']
name = path_parts[1]
del path_parts[1]
script_name = environ.get('SCRIPT_NAME','')
script_name = posixpath.normpath(script_name+'/'+name)
if script_name.endswith('/'):
script_name = script_name[:-1]
if not name and not script_name.endswith('/'):
script_name += '/'
environ['SCRIPT_NAME'] = script_name
environ['PATH_INFO'] = '/'.join(path_parts)
# Special case: '/.' on PATH_INFO doesn't get stripped,
# because we don't strip the last element of PATH_INFO
# if there's only one path part left. Instead of fixing this
# above, we fix it here so that PATH_INFO gets normalized to
# an empty string in the environ.
if name=='.':
name = None
return name
def setup_testing_defaults(environ):
"""Update 'environ' with trivial defaults for testing purposes
This adds various parameters required for WSGI, including HTTP_HOST,
SERVER_NAME, SERVER_PORT, REQUEST_METHOD, SCRIPT_NAME, PATH_INFO,
and all of the wsgi.* variables. It only supplies default values,
and does not replace any existing settings for these variables.
This routine is intended to make it easier for unit tests of WSGI
servers and applications to set up dummy environments. It should *not*
be used by actual WSGI servers or applications, since the data is fake!
"""
environ.setdefault('SERVER_NAME','127.0.0.1')
environ.setdefault('SERVER_PROTOCOL','HTTP/1.0')
environ.setdefault('HTTP_HOST',environ['SERVER_NAME'])
environ.setdefault('REQUEST_METHOD','GET')
if 'SCRIPT_NAME' not in environ and 'PATH_INFO' not in environ:
environ.setdefault('SCRIPT_NAME','')
environ.setdefault('PATH_INFO','/')
environ.setdefault('wsgi.version', (1,0))
environ.setdefault('wsgi.run_once', 0)
environ.setdefault('wsgi.multithread', 0)
environ.setdefault('wsgi.multiprocess', 0)
from io import StringIO, BytesIO
environ.setdefault('wsgi.input', BytesIO())
environ.setdefault('wsgi.errors', StringIO())
environ.setdefault('wsgi.url_scheme',guess_scheme(environ))
if environ['wsgi.url_scheme']=='http':
environ.setdefault('SERVER_PORT', '80')
elif environ['wsgi.url_scheme']=='https':
environ.setdefault('SERVER_PORT', '443')
_hoppish = {
'connection':1, 'keep-alive':1, 'proxy-authenticate':1,
'proxy-authorization':1, 'te':1, 'trailers':1, 'transfer-encoding':1,
'upgrade':1
}.__contains__
def is_hop_by_hop(header_name):
"""Return true if 'header_name' is an HTTP/1.1 "Hop-by-Hop" header"""
return _hoppish(header_name.lower())
| lgpl-3.0 | -2,503,851,634,378,656,300 | 32.909091 | 79 | 0.615625 | false |
border/vnpy | vn.trader/windGateway/windGateway.py | 16 | 6658 | # encoding: UTF-8
'''
Wind Python API的gateway接入
'''
from copy import copy
try:
from WindPy import w
except ImportError:
print u'请先安装WindPy接口'
from vtGateway import *
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_SSE] = 'SH'
exchangeMap[EXCHANGE_SZSE] = 'SZ'
exchangeMap[EXCHANGE_CFFEX] = 'CFE'
exchangeMap[EXCHANGE_SHFE] = 'SHF'
exchangeMap[EXCHANGE_DCE] = 'DCE'
exchangeMap[EXCHANGE_CZCE] = 'CZC'
exchangeMap[EXCHANGE_UNKNOWN] = ''
exchangeMapReverse = {v:k for k,v in exchangeMap.items()}
########################################################################
class WindGateway(VtGateway):
"""Wind接口"""
# 订阅wsq时传入的字段列表
wsqParamMap = {}
wsqParamMap['rt_last'] = 'lastPrice'
wsqParamMap['rt_last_vol'] = 'volume'
wsqParamMap['rt_oi'] = 'openInterest'
wsqParamMap['rt_open'] = 'openPrice'
wsqParamMap['rt_high'] = 'highPrice'
wsqParamMap['rt_low'] = 'lowPrice'
wsqParamMap['rt_pre_close'] = 'preClosePrice'
wsqParamMap['rt_high_limit'] = 'upperLimit'
wsqParamMap['rt_low_limit'] = 'lowerLimit'
wsqParamMap['rt_bid1'] = 'bidPrice1'
wsqParamMap['rt_bid2'] = 'bidPrice2'
wsqParamMap['rt_bid3'] = 'bidPrice3'
wsqParamMap['rt_bid4'] = 'bidPrice4'
wsqParamMap['rt_bid5'] = 'bidPrice5'
wsqParamMap['rt_ask1'] = 'askPrice1'
wsqParamMap['rt_ask2'] = 'askPrice2'
wsqParamMap['rt_ask3'] = 'askPrice3'
wsqParamMap['rt_ask4'] = 'askPrice4'
wsqParamMap['rt_ask5'] = 'askPrice5'
wsqParamMap['rt_bsize1'] = 'bidVolume1'
wsqParamMap['rt_bsize2'] = 'bidVolume2'
wsqParamMap['rt_bsize3'] = 'bidVolume3'
wsqParamMap['rt_bsize4'] = 'bidVolume4'
wsqParamMap['rt_bsize5'] = 'bidVolume5'
wsqParamMap['rt_asize1'] = 'askVolume1'
wsqParamMap['rt_asize2'] = 'askVolume2'
wsqParamMap['rt_asize3'] = 'askVolume3'
wsqParamMap['rt_asize4'] = 'askVolume4'
wsqParamMap['rt_asize5'] = 'askVolume5'
wsqParam = ','.join(wsqParamMap.keys())
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='Wind'):
"""Constructor"""
super(WindGateway, self).__init__(eventEngine, gatewayName)
self.w = w # Wind API对象
self.connected = False # 连接状态
# Wind的wsq更新采用的是增量更新模式,每次推送只会更新发生变化的字段
# 而vt中的tick是完整更新,因此需要本地维护一个所有字段的快照
self.tickDict = {}
self.registerEvent()
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 由于w.start方法会阻塞较长时间
# 因此设计为异步模式,交给事件处理线程去处理
# 另外w.start和WingIDE的debug模块有冲突,会导致异常退出
event = Event(type_=EVENT_WIND_CONNECTREQ)
self.eventEngine.put(event)
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
windSymbol = '.'.join([subscribeReq.symbol, exchangeMap[subscribeReq.exchange]])
data = self.w.wsq(windSymbol, self.wsqParam, func=self.wsqCallBack)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'Wind接口未实现发单功能'
self.onLog(log)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'Wind接口未实现撤单功能'
self.onLog(log)
#----------------------------------------------------------------------
def getAccount(self):
"""查询账户资金"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'Wind接口未实现查询账户功能'
self.onLog(log)
#----------------------------------------------------------------------
def getPosition(self):
"""查询持仓"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'Wind接口未实现查询持仓功能'
self.onLog(log)
#----------------------------------------------------------------------
def close(self):
self.w.stop()
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_WIND_CONNECTREQ, self.wConnect)
#----------------------------------------------------------------------
def wsqCallBack(self, data):
"""收到wsq推送"""
windSymbol = data.Codes[0]
if windSymbol in self.tickDict:
tick = self.tickDict[windSymbol]
else:
tick = VtTickData()
tick.gatewayName = self.gatewayName
symbolSplit = windSymbol.split('.')
tick.symbol = symbolSplit[0]
tick.exchange = exchangeMapReverse[symbolSplit[1]]
tick.vtSymbol = '.'.join([tick.symbol, tick.exchange])
self.tickDict[windSymbol] = tick
dt = data.Times[0]
tick.time = dt.strftime('%H:%M:%S')
tick.date = dt.strftime('%Y%m%d')
# 采用遍历的形式读取数值
fields = data.Fields
values = data.Data
d = tick.__dict__
for n, field in enumerate(fields):
field = field.lower()
key = self.wsqParamMap[field]
value = values[n][0]
d[key] = value
newtick = copy(tick)
self.onTick(newtick)
#----------------------------------------------------------------------
def wConnect(self, event):
"""利用事件处理线程去异步连接Wind接口"""
result = self.w.start()
log = VtLogData()
log.gatewayName = self.gatewayName
if not result.ErrorCode:
log.logContent = u'Wind接口连接成功'
else:
log.logContent = u'Wind接口连接失败,错误代码%d' %result.ErrorCode
self.onLog(log) | mit | 6,544,425,153,080,652,000 | 32.281081 | 88 | 0.491066 | false |
biospi/seamass-windeps | src/boost_1_57_0/libs/python/test/test_cltree.py | 46 | 1072 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#!/usr/bin/env python
from cltree import basic,symbol,constant,variable
b = basic()
c = constant()
s = symbol()
v = variable()
assert isinstance(b,basic)
assert not isinstance(b,symbol)
assert not isinstance(b,constant)
assert not isinstance(b,variable)
assert isinstance(c,basic)
assert isinstance(c,constant)
assert not isinstance(c,symbol)
assert not isinstance(c,variable)
assert not isinstance(s,basic)
assert isinstance(s,symbol)
assert not isinstance(s,constant)
assert not isinstance(s,variable)
assert isinstance(v,basic)
assert not isinstance(v,symbol)
assert not isinstance(v,constant)
assert isinstance(v,variable)
print 'b=',b
assert repr(b)=='cltree.basic()'
print 's=',s
assert repr(s)!='cltree.wrapped_symbol()' # because not isinstance(s,basic)
print 'c=',c
assert repr(c)=='cltree.constant()'
print 'v=',v
assert repr(v)=='cltree.wrapped_variable()'
print 'ok'
| apache-2.0 | -1,727,333,699,628,102,400 | 23.930233 | 75 | 0.754664 | false |
sclamons/murraylab_tools | murraylab_tools/tests/echo_tests/test_destinationplate.py | 1 | 1684 | import os
import pytest
import numpy as np
import murraylab_tools.echo as mt_echo
@pytest.fixture()
def test_dir():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
def gen_plate():
fname = 'dplate.dat'
if os.path.exists(fname):
os.rm(fname)
dplate = mt_echo.DestinationPlate(filename=fname)
return dplate
@pytest.fixture(scope="session")
def unused_plate():
return gen_plate()
@pytest.fixture(scope="session")
def used_plate():
dplate = gen_plate()
dplate.request_wells(10)
return dplate
def test_request_wells(unused_plate):
wells = unused_plate.request_wells(5)
assert np.all(wells == np.array(['A01', 'A02', 'A03', 'A04', 'A05']))
def test_request_wells_from_used_plate(used_plate):
wells = used_plate.request_wells(5)
assert np.all(wells == np.array(['A11', 'A12', 'A13', 'A14', 'A15']))
def test_request_too_many_wells(unused_plate):
with pytest.raises(Exception):
wells = unused_plate.request_wells(500)
# def test_make_simple_picklist(test_dir):
# # TODO Expand on assertions
# dplate = mt_echo.DestinationPlate()
# splate = mt_echo.SourcePlate()
# splate.load_well_definitions(os.path.join(test_dir,
# 'test_def_good_column_names.csv'))
# rxns = [
# [
# ['chem', 5, 10],
# ['h2o', 5]
# ],
# [
# ['chem', 5, 100],
# ['h2o', 5]
# ]
# ]
# picklist, _ = dplate.make_picklist(splate, rxns)
# assert picklist[0][0] == 'A3'
# assert picklist[2][0] == 'A5'
| mit | 2,820,130,651,303,012,400 | 27.542373 | 76 | 0.564133 | false |
bruderstein/PythonScript | PythonLib/min/selectors.py | 16 | 19536 | """Selectors module.
This module allows high-level and efficient I/O multiplexing, built upon the
`select` module primitives.
"""
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from collections.abc import Mapping
import math
import select
import sys
# generic events, that must be mapped to implementation-specific ones
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
def _fileobj_to_fd(fileobj):
"""Return a file descriptor from a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
corresponding file descriptor
Raises:
ValueError if the object is invalid
"""
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: "
"{!r}".format(fileobj)) from None
if fd < 0:
raise ValueError("Invalid file descriptor: {}".format(fd))
return fd
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
SelectorKey.__doc__ = """SelectorKey(fileobj, fd, events, data)
Object used to associate a file object to its backing
file descriptor, selected event mask, and attached data.
"""
if sys.version_info >= (3, 5):
SelectorKey.fileobj.__doc__ = 'File object registered.'
SelectorKey.fd.__doc__ = 'Underlying file descriptor.'
SelectorKey.events.__doc__ = 'Events that must be waited for on this file object.'
SelectorKey.data.__doc__ = ('''Optional opaque data associated to this file object.
For example, this could be used to store a per-client session ID.''')
class _SelectorMapping(Mapping):
"""Mapping of file objects to selector keys."""
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(metaclass=ABCMeta):
"""Selector abstract base class.
A selector supports registering file objects to be monitored for specific
I/O events.
A file object is a file descriptor or any object with a `fileno()` method.
An arbitrary object can be attached to the file object, which can be used
for example to store context information, a callback, etc.
A selector can use various implementations (select(), poll(), epoll()...)
depending on the platform. The default `Selector` class uses the most
efficient implementation on the current platform.
"""
@abstractmethod
def register(self, fileobj, events, data=None):
"""Register a file object.
Parameters:
fileobj -- file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
ValueError if events is invalid
KeyError if fileobj is already registered
OSError if fileobj is closed or otherwise is unacceptable to
the underlying system call (if a system call is made)
Note:
OSError may or may not be raised
"""
raise NotImplementedError
@abstractmethod
def unregister(self, fileobj):
"""Unregister a file object.
Parameters:
fileobj -- file object or file descriptor
Returns:
SelectorKey instance
Raises:
KeyError if fileobj is not registered
Note:
If fileobj is registered but has since been closed this does
*not* raise OSError (even if the wrapped syscall does)
"""
raise NotImplementedError
def modify(self, fileobj, events, data=None):
"""Change a registered file object monitored events or attached data.
Parameters:
fileobj -- file object or file descriptor
events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE)
data -- attached data
Returns:
SelectorKey instance
Raises:
Anything that unregister() or register() raises
"""
self.unregister(fileobj)
return self.register(fileobj, events, data)
@abstractmethod
def select(self, timeout=None):
"""Perform the actual selection, until some monitored file objects are
ready or a timeout expires.
Parameters:
timeout -- if timeout > 0, this specifies the maximum wait time, in
seconds
if timeout <= 0, the select() call won't block, and will
report the currently ready file objects
if timeout is None, select() will block until a monitored
file object becomes ready
Returns:
list of (key, events) for ready file objects
`events` is a bitwise mask of EVENT_READ|EVENT_WRITE
"""
raise NotImplementedError
def close(self):
"""Close the selector.
This must be called to make sure that any underlying resource is freed.
"""
pass
def get_key(self, fileobj):
"""Return the key associated to a registered file object.
Returns:
SelectorKey for this file object
"""
mapping = self.get_map()
if mapping is None:
raise RuntimeError('Selector is closed')
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
@abstractmethod
def get_map(self):
"""Return a mapping of file objects to selector keys."""
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
class _BaseSelectorImpl(BaseSelector):
"""Base selector implementation."""
def __init__(self):
# this maps file descriptors to keys
self._fd_to_key = {}
# read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
"""Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive search in case
the object is invalid but we still have it in our map. This
is used by unregister() so we can unregister an object that
was previously registered even if it is closed. It is also
used by _SelectorMapping.
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Do an exhaustive search.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{!r} (FD {}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
return key
def modify(self, fileobj, events, data=None):
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{!r} is not registered".format(fileobj)) from None
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def close(self):
self._fd_to_key.clear()
self._map = None
def get_map(self):
return self._map
def _key_from_fd(self, fd):
"""Return the key associated to a given file descriptor.
Parameters:
fd -- file descriptor
Returns:
corresponding key, or None if not found
"""
try:
return self._fd_to_key[fd]
except KeyError:
return None
class SelectSelector(_BaseSelectorImpl):
"""Select-based selector."""
def __init__(self):
super().__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super().unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
if sys.platform == 'win32':
def _select(self, r, w, _, timeout=None):
r, w, x = select.select(r, w, w, timeout)
return r, w + x, []
else:
_select = select.select
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
ready = []
try:
r, w, _ = self._select(self._readers, self._writers, [], timeout)
except InterruptedError:
return ready
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
class _PollLikeSelector(_BaseSelectorImpl):
"""Base class shared between poll, epoll and devpoll selectors."""
_selector_cls = None
_EVENT_READ = None
_EVENT_WRITE = None
def __init__(self):
super().__init__()
self._selector = self._selector_cls()
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
poller_events = 0
if events & EVENT_READ:
poller_events |= self._EVENT_READ
if events & EVENT_WRITE:
poller_events |= self._EVENT_WRITE
try:
self._selector.register(key.fd, poller_events)
except:
super().unregister(fileobj)
raise
return key
def unregister(self, fileobj):
key = super().unregister(fileobj)
try:
self._selector.unregister(key.fd)
except OSError:
# This can happen if the FD was closed since it
# was registered.
pass
return key
def modify(self, fileobj, events, data=None):
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError(f"{fileobj!r} is not registered") from None
changed = False
if events != key.events:
selector_events = 0
if events & EVENT_READ:
selector_events |= self._EVENT_READ
if events & EVENT_WRITE:
selector_events |= self._EVENT_WRITE
try:
self._selector.modify(key.fd, selector_events)
except:
super().unregister(fileobj)
raise
changed = True
if data != key.data:
changed = True
if changed:
key = key._replace(events=events, data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
# This is shared between poll() and epoll().
# epoll() has a different signature and handling of timeout parameter.
if timeout is None:
timeout = None
elif timeout <= 0:
timeout = 0
else:
# poll() has a resolution of 1 millisecond, round away from
# zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
ready = []
try:
fd_event_list = self._selector.poll(timeout)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~self._EVENT_READ:
events |= EVENT_WRITE
if event & ~self._EVENT_WRITE:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, 'poll'):
class PollSelector(_PollLikeSelector):
"""Poll-based selector."""
_selector_cls = select.poll
_EVENT_READ = select.POLLIN
_EVENT_WRITE = select.POLLOUT
if hasattr(select, 'epoll'):
class EpollSelector(_PollLikeSelector):
"""Epoll-based selector."""
_selector_cls = select.epoll
_EVENT_READ = select.EPOLLIN
_EVENT_WRITE = select.EPOLLOUT
def fileno(self):
return self._selector.fileno()
def select(self, timeout=None):
if timeout is None:
timeout = -1
elif timeout <= 0:
timeout = 0
else:
# epoll_wait() has a resolution of 1 millisecond, round away
# from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3) * 1e-3
# epoll_wait() expects `maxevents` to be greater than zero;
# we want to make sure that `select()` can be called when no
# FD is registered.
max_ev = max(len(self._fd_to_key), 1)
ready = []
try:
fd_event_list = self._selector.poll(timeout, max_ev)
except InterruptedError:
return ready
for fd, event in fd_event_list:
events = 0
if event & ~select.EPOLLIN:
events |= EVENT_WRITE
if event & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._selector.close()
super().close()
if hasattr(select, 'devpoll'):
class DevpollSelector(_PollLikeSelector):
"""Solaris /dev/poll selector."""
_selector_cls = select.devpoll
_EVENT_READ = select.POLLIN
_EVENT_WRITE = select.POLLOUT
def fileno(self):
return self._selector.fileno()
def close(self):
self._selector.close()
super().close()
if hasattr(select, 'kqueue'):
class KqueueSelector(_BaseSelectorImpl):
"""Kqueue-based selector."""
def __init__(self):
super().__init__()
self._selector = select.kqueue()
def fileno(self):
return self._selector.fileno()
def register(self, fileobj, events, data=None):
key = super().register(fileobj, events, data)
try:
if events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_ADD)
self._selector.control([kev], 0, 0)
if events & EVENT_WRITE:
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
self._selector.control([kev], 0, 0)
except:
super().unregister(fileobj)
raise
return key
def unregister(self, fileobj):
key = super().unregister(fileobj)
if key.events & EVENT_READ:
kev = select.kevent(key.fd, select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
self._selector.control([kev], 0, 0)
except OSError:
# This can happen if the FD was closed since it
# was registered.
pass
if key.events & EVENT_WRITE:
kev = select.kevent(key.fd, select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
self._selector.control([kev], 0, 0)
except OSError:
# See comment above.
pass
return key
def select(self, timeout=None):
timeout = None if timeout is None else max(timeout, 0)
# If max_ev is 0, kqueue will ignore the timeout. For consistent
# behavior with the other selector classes, we prevent that here
# (using max). See https://bugs.python.org/issue29255
max_ev = max(len(self._fd_to_key), 1)
ready = []
try:
kev_list = self._selector.control(None, max_ev, timeout)
except InterruptedError:
return ready
for kev in kev_list:
fd = kev.ident
flag = kev.filter
events = 0
if flag == select.KQ_FILTER_READ:
events |= EVENT_READ
if flag == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._selector.close()
super().close()
def _can_use(method):
"""Check if we can use the selector depending upon the
operating system. """
# Implementation based upon https://github.com/sethmlarson/selectors2/blob/master/selectors2.py
selector = getattr(select, method, None)
if selector is None:
# select module does not implement method
return False
# check if the OS and Kernel actually support the method. Call may fail with
# OSError: [Errno 38] Function not implemented
try:
selector_obj = selector()
if method == 'poll':
# check that poll actually works
selector_obj.poll(0)
else:
# close epoll, kqueue, and devpoll fd
selector_obj.close()
return True
except OSError:
return False
# Choose the best implementation, roughly:
# epoll|kqueue|devpoll > poll > select.
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if _can_use('kqueue'):
DefaultSelector = KqueueSelector
elif _can_use('epoll'):
DefaultSelector = EpollSelector
elif _can_use('devpoll'):
DefaultSelector = DevpollSelector
elif _can_use('poll'):
DefaultSelector = PollSelector
else:
DefaultSelector = SelectSelector
| gpl-2.0 | -1,971,390,149,032,414,000 | 30.560582 | 99 | 0.560657 | false |
jquesnelle/pulp-or | examples/furniture.py | 3 | 1043 | """
The Furniture problem from EngSci391 for the PuLP Modeller
Author: Dr Stuart Mitchell 2007
"""
from pulp import *
Chairs = ["A","B"]
costs = {"A":100,
"B":150}
Resources = ["Lathe","Polisher"]
capacity = {"Lathe" : 40,
"Polisher" : 48}
activity = [ #Chairs
#A B
[1, 2], #Lathe
[3, 1.5] #Polisher
]
activity = makeDict([Resources,Chairs],activity)
prob = LpProblem("Furniture Manufacturing Problem", LpMaximize)
vars = LpVariable.dicts("Number of Chairs",Chairs, lowBound = 0)
#objective
prob += lpSum([costs[c]*vars[c] for c in Chairs])
for r in Resources:
prob += lpSum([activity[r][c]*vars[c] for c in Chairs]) <= capacity[r], \
"capacity_of_%s"%r
prob.writeLP("furniture.lp")
prob.solve()
# Each of the variables is printed with it's value
for v in prob.variables():
print(v.name, "=", v.varValue)
# The optimised objective function value is printed to the screen
print("Total Revenue from Production = ", value(prob.objective)) | mit | -877,327,377,745,038 | 32.677419 | 77 | 0.627996 | false |
pymedusa/SickRage | ext/adba/aniDBcommands.py | 4 | 17703 | #!/usr/bin/env python
# coding=utf-8
#
# This file is part of aDBa.
#
# aDBa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# aDBa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with aDBa. If not, see <http://www.gnu.org/licenses/>.
from threading import Lock
from .aniDBresponses import *
from .aniDBerrors import *
class Command:
queue = {None: None}
def __init__(self, command, **parameters):
self.command = command
self.parameters = parameters
self.raw = self.flatten(command, parameters)
self.mode = None
self.callback = None
self.waiter = Lock()
self.waiter.acquire()
def __repr__(self):
return "Command(%r,%r) %r\n%s\n" % (self.tag, self.command, self.parameters, self.raw_data())
def authorize(self, mode, tag, session, callback):
self.mode = mode
self.callback = callback
self.tag = tag
self.session = session
self.parameters['tag'] = tag
self.parameters['s'] = session
def handle(self, resp):
self.resp = resp
if self.mode == 1:
self.waiter.release()
elif self.mode == 2:
self.callback(resp)
def wait_response(self):
self.waiter.acquire()
def flatten(self, command, parameters):
tmp = []
for key, value in parameters.items():
if value is None:
continue
tmp.append("%s=%s" % (self.escape(key), self.escape(value)))
return ' '.join([command, '&'.join(tmp)])
@staticmethod
def escape(data):
return str(data).replace('&', '&')
def raw_data(self):
self.raw = self.flatten(self.command, self.parameters)
return self.raw
def cached(self, interface, database):
return None
def cache(self, interface, database):
pass
# first run
class AuthCommand(Command):
def __init__(self, username, password, protover, client, clientver, nat=None, comp=None, enc=None, mtu=None):
parameters = {'user': username, 'pass': password, 'protover': protover, 'client': client, 'clientver': clientver, 'nat': nat, 'comp': comp, 'enc': enc, 'mtu': mtu}
Command.__init__(self, 'AUTH', **parameters)
class LogoutCommand(Command):
def __init__(self):
Command.__init__(self, 'LOGOUT')
# third run (at the same time as second)
class PushCommand(Command):
def __init__(self, notify, msg, buddy=None):
parameters = {'notify': notify, 'msg': msg, 'buddy': buddy}
Command.__init__(self, 'PUSH', **parameters)
class PushAckCommand(Command):
def __init__(self, nid):
parameters = {'nid': nid}
Command.__init__(self, 'PUSHACK', **parameters)
class NotifyAddCommand(Command):
def __init__(self, aid=None, gid=None, type=None, priority=None):
if not (aid or gid) or (aid and gid):
raise AniDBIncorrectParameterError("You must provide aid OR gid for NOTIFICATIONADD command")
parameters = {'aid': aid, "gid": gid, "type": type, "priority": priority}
Command.__init__(self, 'NOTIFICATIONADD', **parameters)
class NotifyCommand(Command):
def __init__(self, buddy=None):
parameters = {'buddy': buddy}
Command.__init__(self, 'NOTIFY', **parameters)
class NotifyListCommand(Command):
def __init__(self):
Command.__init__(self, 'NOTIFYLIST')
class NotifyGetCommand(Command):
def __init__(self, type, id):
parameters = {'type': type, 'id': id}
Command.__init__(self, 'NOTIFYGET', **parameters)
class NotifyAckCommand(Command):
def __init__(self, type, id):
parameters = {'type': type, 'id': id}
Command.__init__(self, 'NOTIFYACK', **parameters)
class BuddyAddCommand(Command):
def __init__(self, uid=None, uname=None):
if not (uid or uname) or (uid and uname):
raise AniDBIncorrectParameterError("You must provide <u(id|name)> for BUDDYADD command")
parameters = {'uid': uid, 'uname': uname.lower()}
Command.__init__(self, 'BUDDYADD', **parameters)
class BuddyDelCommand(Command):
def __init__(self, uid):
parameters = {'uid': uid}
Command.__init__(self, 'BUDDYDEL', **parameters)
class BuddyAcceptCommand(Command):
def __init__(self, uid):
parameters = {'uid': uid}
Command.__init__(self, 'BUDDYACCEPT', **parameters)
class BuddyDenyCommand(Command):
def __init__(self, uid):
parameters = {'uid': uid}
Command.__init__(self, 'BUDDYDENY', **parameters)
class BuddyListCommand(Command):
def __init__(self, startat):
parameters = {'startat': startat}
Command.__init__(self, 'BUDDYLIST', **parameters)
class BuddyStateCommand(Command):
def __init__(self, startat):
parameters = {'startat': startat}
Command.__init__(self, 'BUDDYSTATE', **parameters)
# first run
class AnimeCommand(Command):
def __init__(self, aid=None, aname=None, amask=None):
if not (aid or aname):
raise AniDBIncorrectParameterError("You must provide <a(id|name)> for ANIME command")
parameters = {'aid': aid, 'aname': aname, 'amask': amask}
Command.__init__(self, 'ANIME', **parameters)
class EpisodeCommand(Command):
def __init__(self, eid=None, aid=None, aname=None, epno=None):
if not (eid or ((aname or aid) and epno)) or (aname and aid) or (eid and (aname or aid or epno)):
raise AniDBIncorrectParameterError("You must provide <eid XOR a(id|name)+epno> for EPISODE command")
parameters = {'eid': eid, 'aid': aid, 'aname': aname, 'epno': epno}
Command.__init__(self, 'EPISODE', **parameters)
class FileCommand(Command):
def __init__(self, fid=None, size=None, ed2k=None, aid=None, aname=None, gid=None, gname=None, epno=None, fmask=None, amask=None):
if not (fid or (size and ed2k) or ((aid or aname) and (gid or gname) and epno)) or (fid and (size or ed2k or aid or aname or gid or gname or epno)) or ((size and ed2k) and (fid or aid or aname or gid or gname or epno)) or (((aid or aname) and (gid or gname) and epno) and (fid or size or ed2k)) or (aid and aname) or (gid and gname):
raise AniDBIncorrectParameterError("You must provide <fid XOR size+ed2k XOR a(id|name)+g(id|name)+epno> for FILE command")
parameters = {'fid': fid, 'size': size, 'ed2k': ed2k, 'aid': aid, 'aname': aname, 'gid': gid, 'gname': gname, 'epno': epno, 'fmask': fmask, 'amask': amask}
Command.__init__(self, 'FILE', **parameters)
class GroupCommand(Command):
def __init__(self, gid=None, gname=None):
if not (gid or gname) or (gid and gname):
raise AniDBIncorrectParameterError("You must provide <g(id|name)> for GROUP command")
parameters = {'gid': gid, 'gname': gname}
Command.__init__(self, 'GROUP', **parameters)
class GroupstatusCommand(Command):
def __init__(self, aid=None, status=None):
if not aid:
raise AniDBIncorrectParameterError("You must provide aid for GROUPSTATUS command")
parameters = {'aid': aid, 'status': status}
Command.__init__(self, 'GROUPSTATUS', **parameters)
class ProducerCommand(Command):
def __init__(self, pid=None, pname=None):
if not (pid or pname) or (pid and pname):
raise AniDBIncorrectParameterError("You must provide <p(id|name)> for PRODUCER command")
parameters = {'pid': pid, 'pname': pname}
Command.__init__(self, 'PRODUCER', **parameters)
def cached(self, intr, db):
pid = self.parameters['pid']
pname = self.parameters['pname']
codes = ('pid', 'name', 'shortname', 'othername', 'type', 'pic', 'url')
names = ','.join([code for code in codes if code != ''])
ruleholder = (pid and 'pid=%s' or '(name=%s OR shortname=%s OR othername=%s)')
rulevalues = (pid and [pid] or [pname, pname, pname])
rows = db.select('ptb', names, ruleholder + " AND status&8", *rulevalues)
if len(rows) > 1:
raise AniDBInternalError("It shouldn't be possible for database to return more than 1 line for PRODUCER cache")
elif not len(rows):
return None
else:
resp = ProducerResponse(self, None, '245', 'CACHED PRODUCER', [list(rows[0])])
resp.parse()
return resp
def cache(self, intr, db):
if self.resp.rescode != '245' or self.cached(intr, db):
return
codes = ('pid', 'name', 'shortname', 'othername', 'type', 'pic', 'url')
if len(db.select('ptb', 'pid', 'pid=%s', self.resp.datalines[0]['pid'])):
sets = 'status=status|15,' + ','.join([code + '=%s' for code in codes if code != ''])
values = [self.resp.datalines[0][code] for code in codes if code != ''] + [self.resp.datalines[0]['pid']]
db.update('ptb', sets, 'pid=%s', *values)
else:
names = 'status,' + ','.join([code for code in codes if code != ''])
valueholders = '0,' + ','.join(['%s' for code in codes if code != ''])
values = [self.resp.datalines[0][code] for code in codes if code != '']
db.insert('ptb', names, valueholders, *values)
class MyListCommand(Command):
def __init__(self, lid=None, fid=None, size=None, ed2k=None, aid=None, aname=None, gid=None, gname=None, epno=None):
if not (lid or fid or (size and ed2k) or (aid or aname)) or (lid and (fid or size or ed2k or aid or aname or gid or gname or epno)) or (fid and (lid or size or ed2k or aid or aname or gid or gname or epno)) or ((size and ed2k) and (lid or fid or aid or aname or gid or gname or epno)) or ((aid or aname) and (lid or fid or size or ed2k)) or (aid and aname) or (gid and gname):
raise AniDBIncorrectParameterError("You must provide <lid XOR fid XOR size+ed2k XOR a(id|name)+g(id|name)+epno> for MYLIST command")
parameters = {'lid': lid, 'fid': fid, 'size': size, 'ed2k': ed2k, 'aid': aid, 'aname': aname, 'gid': gid, 'gname': gname, 'epno': epno}
Command.__init__(self, 'MYLIST', **parameters)
def cached(self, intr, db):
lid = self.parameters['lid']
fid = self.parameters['fid']
size = self.parameters['size']
ed2k = self.parameters['ed2k']
aid = self.parameters['aid']
aname = self.parameters['aname']
gid = self.parameters['gid']
gname = self.parameters['gname']
epno = self.parameters['epno']
names = ','.join([code for code in MylistResponse(None, None, None, None, []).codetail if code != ''])
if lid:
ruleholder = "lid=%s"
rulevalues = [lid]
elif fid or size or ed2k:
resp = intr.file(fid=fid, size=size, ed2k=ed2k)
if resp.rescode != '220':
resp = NoSuchMylistFileResponse(self, None, '321', 'NO SUCH ENTRY (FILE NOT FOUND)', [])
resp.parse()
return resp
fid = resp.datalines[0]['fid']
ruleholder = "fid=%s"
rulevalues = [fid]
else:
resp = intr.anime(aid=aid, aname=aname)
if resp.rescode != '230':
resp = NoSuchFileResponse(self, None, '321', 'NO SUCH ENTRY (ANIME NOT FOUND)', [])
resp.parse()
return resp
aid = resp.datalines[0]['aid']
resp = intr.group(gid=gid, gname=gname)
if resp.rescode != '250':
resp = NoSuchFileResponse(self, None, '321', 'NO SUCH ENTRY (GROUP NOT FOUND)', [])
resp.parse()
return resp
gid = resp.datalines[0]['gid']
resp = intr.episode(aid=aid, epno=epno)
if resp.rescode != '240':
resp = NoSuchFileResponse(self, None, '321', 'NO SUCH ENTRY (EPISODE NOT FOUND)', [])
resp.parse()
return resp
eid = resp.datalines[0]['eid']
ruleholder = "aid=%s AND eid=%s AND gid=%s"
rulevalues = [aid, eid, gid]
rows = db.select('ltb', names, ruleholder + " AND status&8", *rulevalues)
if len(rows) > 1:
# resp=MultipleFilesFoundResponse(self,None,'322','CACHED MULTIPLE FILES FOUND',/*get fids from rows, not gonna do this as you haven't got a real cache out of these..*/)
return None
elif not len(rows):
return None
else:
resp = MylistResponse(self, None, '221', 'CACHED MYLIST', [list(rows[0])])
resp.parse()
return resp
def cache(self, intr, db):
if self.resp.rescode != '221' or self.cached(intr, db):
return
codes = MylistResponse(None, None, None, None, []).codetail
if len(db.select('ltb', 'lid', 'lid=%s', self.resp.datalines[0]['lid'])):
sets = 'status=status|15,' + ','.join([code + '=%s' for code in codes if code != ''])
values = [self.resp.datalines[0][code] for code in codes if code != ''] + [self.resp.datalines[0]['lid']]
db.update('ltb', sets, 'lid=%s', *values)
else:
names = 'status,' + ','.join([code for code in codes if code != ''])
valueholders = '15,' + ','.join(['%s' for code in codes if code != ''])
values = [self.resp.datalines[0][code] for code in codes if code != '']
db.insert('ltb', names, valueholders, *values)
class MyListAddCommand(Command):
def __init__(self, lid=None, fid=None, size=None, ed2k=None, aid=None, aname=None, gid=None, gname=None, epno=None, edit=None, state=None, viewed=None, source=None, storage=None, other=None):
if not (lid or fid or (size and ed2k) or ((aid or aname) and (gid or gname))) or (lid and (fid or size or ed2k or aid or aname or gid or gname or epno)) or (fid and (lid or size or ed2k or aid or aname or gid or gname or epno)) or ((size and ed2k) and (lid or fid or aid or aname or gid or gname or epno)) or (((aid or aname) and (gid or gname)) and (lid or fid or size or ed2k)) or (aid and aname) or (gid and gname) or (lid and not edit):
raise AniDBIncorrectParameterError("You must provide <lid XOR fid XOR size+ed2k XOR a(id|name)+g(id|name)+epno> for MYLISTADD command")
parameters = {'lid': lid, 'fid': fid, 'size': size, 'ed2k': ed2k, 'aid': aid, 'aname': aname, 'gid': gid, 'gname': gname, 'epno': epno, 'edit': edit, 'state': state, 'viewed': viewed, 'source': source, 'storage': storage, 'other': other}
Command.__init__(self, 'MYLISTADD', **parameters)
class MyListDelCommand(Command):
def __init__(self, lid=None, fid=None, aid=None, aname=None, gid=None, gname=None, epno=None):
if not (lid or fid or ((aid or aname) and (gid or gname) and epno)) or (lid and (fid or aid or aname or gid or gname or epno)) or (fid and (lid or aid or aname or gid or gname or epno)) or (((aid or aname) and (gid or gname) and epno) and (lid or fid)) or (aid and aname) or (gid and gname):
raise AniDBIncorrectParameterError("You must provide <lid+edit=1 XOR fid XOR a(id|name)+g(id|name)+epno> for MYLISTDEL command")
parameters = {'lid': lid, 'fid': fid, 'aid': aid, 'aname': aname, 'gid': gid, 'gname': gname, 'epno': epno}
Command.__init__(self, 'MYLISTDEL', **parameters)
class MyListStatsCommand(Command):
def __init__(self):
Command.__init__(self, 'MYLISTSTATS')
class VoteCommand(Command):
def __init__(self, type, id=None, name=None, value=None, epno=None):
if not (id or name) or (id and name):
raise AniDBIncorrectParameterError("You must provide <(id|name)> for VOTE command")
parameters = {'type': type, 'id': id, 'name': name, 'value': value, 'epno': epno}
Command.__init__(self, 'VOTE', **parameters)
class RandomAnimeCommand(Command):
def __init__(self, type):
parameters = {'type': type}
Command.__init__(self, 'RANDOMANIME', **parameters)
class PingCommand(Command):
def __init__(self):
Command.__init__(self, 'PING')
# second run
class EncryptCommand(Command):
def __init__(self, user, apipassword, type):
self.apipassword = apipassword
parameters = {'user': user.lower(), 'type': type}
Command.__init__(self, 'ENCRYPT', **parameters)
class EncodingCommand(Command):
def __init__(self, name):
parameters = {'name': type}
Command.__init__(self, 'ENCODING', **parameters)
class SendMsgCommand(Command):
def __init__(self, to, title, body):
if len(title) > 50 or len(body) > 900:
raise AniDBIncorrectParameterError("Title must not be longer than 50 chars and body must not be longer than 900 chars for SENDMSG command")
parameters = {'to': to.lower(), 'title': title, 'body': body}
Command.__init__(self, 'SENDMSG', **parameters)
class UserCommand(Command):
def __init__(self, user):
parameters = {'user': user}
Command.__init__(self, 'USER', **parameters)
class UptimeCommand(Command):
def __init__(self):
Command.__init__(self, 'UPTIME')
class VersionCommand(Command):
def __init__(self):
Command.__init__(self, 'VERSION')
| gpl-3.0 | -1,268,931,044,067,858,200 | 40.654118 | 448 | 0.599955 | false |
GENI-NSF/gram | pi_gram/src/gram/am/__init__.py | 6 | 1218 | #----------------------------------------------------------------------
# Copyright (c) 2013-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
| mit | -577,395,658,159,688,800 | 51.956522 | 72 | 0.681445 | false |
mdittmer/chromium-dashboard | models.py | 1 | 21219 | import datetime
import logging
import time
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
#from google.appengine.ext.db import djangoforms
#from django.forms import ModelForm
from collections import OrderedDict
from django import forms
import settings
SIMPLE_TYPES = (int, long, float, bool, dict, basestring, list)
WEBCOMPONENTS = 1
MISC = 2
SECURITY = 3
MULTIMEDIA = 4
DOM = 5
FILE = 6
OFFLINE = 7
DEVICE = 8
COMMUNICATION = 9
JAVASCRIPT = 10
NETWORKING = 11
INPUT = 12
PERFORMANCE = 13
GRAPHICS = 14
CSS = 15
FEATURE_CATEGORIES = {
CSS: 'CSS',
WEBCOMPONENTS: 'Web Components',
MISC: 'Misc',
SECURITY: 'Security',
MULTIMEDIA: 'Multimedia',
DOM: 'DOM',
FILE: 'File APIs',
OFFLINE: 'Offline / Storage',
DEVICE: 'Device',
COMMUNICATION: 'Realtime / Communication',
JAVASCRIPT: 'JavaScript',
NETWORKING: 'Network / Connectivity',
INPUT: 'User input',
PERFORMANCE: 'Performance',
GRAPHICS: 'Graphics',
}
NO_ACTIVE_DEV = 1
PROPOSED = 2
IN_DEVELOPMENT = 3
BEHIND_A_FLAG = 4
ENABLED_BY_DEFAULT = 5
DEPRECATED = 6
REMOVED = 7
EXPERIMENTAL_FRAMEWORK = 8
NO_LONGER_PURSUING = 1000 # insure bottom of list
IMPLEMENTATION_STATUS = OrderedDict()
IMPLEMENTATION_STATUS[NO_ACTIVE_DEV] = 'No active development'
IMPLEMENTATION_STATUS[PROPOSED] = 'Proposed'
IMPLEMENTATION_STATUS[IN_DEVELOPMENT] = 'In development'
IMPLEMENTATION_STATUS[BEHIND_A_FLAG] = 'Behind a flag'
IMPLEMENTATION_STATUS[EXPERIMENTAL_FRAMEWORK] = 'In experimental framework'
IMPLEMENTATION_STATUS[ENABLED_BY_DEFAULT] = 'Enabled by default'
IMPLEMENTATION_STATUS[DEPRECATED] = 'Deprecated'
IMPLEMENTATION_STATUS[REMOVED] = 'Removed'
IMPLEMENTATION_STATUS[NO_LONGER_PURSUING] = 'No longer pursuing'
MAJOR_NEW_API = 1
MAJOR_MINOR_NEW_API = 2
SUBSTANTIVE_CHANGES = 3
MINOR_EXISTING_CHANGES = 4
EXTREMELY_SMALL_CHANGE = 5
FOOTPRINT_CHOICES = {
MAJOR_NEW_API: ('A major new independent API (e.g. adding a large # '
'independent concepts with many methods/properties/objects)'),
MAJOR_MINOR_NEW_API: ('Major changes to an existing API OR a minor new '
'independent API (e.g. adding a large # of new '
'methods/properties or introducing new concepts to '
'augment an existing API)'),
SUBSTANTIVE_CHANGES: ('Substantive changes to an existing API (e.g. small '
'number of new methods/properties)'),
MINOR_EXISTING_CHANGES: (
'Minor changes to an existing API (e.g. adding a new keyword/allowed '
'argument to a property/method)'),
EXTREMELY_SMALL_CHANGE: ('Extremely small tweaks to an existing API (e.g. '
'how existing keywords/arguments are interpreted)'),
}
MAINSTREAM_NEWS = 1
WARRANTS_ARTICLE = 2
IN_LARGER_ARTICLE = 3
SMALL_NUM_DEVS = 4
SUPER_SMALL = 5
VISIBILITY_CHOICES = {
MAINSTREAM_NEWS: 'Likely in mainstream tech news',
WARRANTS_ARTICLE: 'Will this feature generate articles on sites like html5rocks.com',
IN_LARGER_ARTICLE: 'Covered as part of a larger article but not on its own',
SMALL_NUM_DEVS: 'Only a very small number of web developers will care about',
SUPER_SMALL: "So small it doesn't need to be covered in this dashboard.",
}
SHIPPED = 1
IN_DEV = 2
PUBLIC_SUPPORT = 3
MIXED_SIGNALS = 4
NO_PUBLIC_SIGNALS = 5
PUBLIC_SKEPTICISM = 6
OPPOSED = 7
VENDOR_VIEWS = {
SHIPPED: 'Shipped',
IN_DEV: 'In development',
PUBLIC_SUPPORT: 'Public support',
MIXED_SIGNALS: 'Mixed public signals',
NO_PUBLIC_SIGNALS: 'No public signals',
PUBLIC_SKEPTICISM: 'Public skepticism',
OPPOSED: 'Opposed',
}
DEFACTO_STD = 1
ESTABLISHED_STD = 2
WORKING_DRAFT = 3
EDITORS_DRAFT = 4
PUBLIC_DISCUSSION = 5
NO_STD_OR_DISCUSSION = 6
STANDARDIZATION = {
DEFACTO_STD: 'De-facto standard',
ESTABLISHED_STD: 'Established standard',
WORKING_DRAFT: 'Working draft or equivalent',
EDITORS_DRAFT: "Editor's draft",
PUBLIC_DISCUSSION: 'Public discussion',
NO_STD_OR_DISCUSSION: 'No public standards discussion',
}
DEV_STRONG_POSITIVE = 1
DEV_POSITIVE = 2
DEV_MIXED_SIGNALS = 3
DEV_NO_SIGNALS = 4
DEV_NEGATIVE = 5
DEV_STRONG_NEGATIVE = 6
WEB_DEV_VIEWS = {
DEV_STRONG_POSITIVE: 'Strongly positive',
DEV_POSITIVE: 'Positive',
DEV_MIXED_SIGNALS: 'Mixed signals',
DEV_NO_SIGNALS: 'No signals',
DEV_NEGATIVE: 'Negative',
DEV_STRONG_NEGATIVE: 'Strongly negative',
}
class DictModel(db.Model):
# def to_dict(self):
# return dict([(p, unicode(getattr(self, p))) for p in self.properties()])
def format_for_template(self):
return self.to_dict()
def to_dict(self):
output = {}
for key, prop in self.properties().iteritems():
value = getattr(self, key)
if value is None or isinstance(value, SIMPLE_TYPES):
output[key] = value
elif isinstance(value, datetime.date):
# Convert date/datetime to ms-since-epoch ("new Date()").
#ms = time.mktime(value.utctimetuple())
#ms += getattr(value, 'microseconds', 0) / 1000
#output[key] = int(ms)
output[key] = unicode(value)
elif isinstance(value, db.GeoPt):
output[key] = {'lat': value.lat, 'lon': value.lon}
elif isinstance(value, db.Model):
output[key] = to_dict(value)
elif isinstance(value, users.User):
output[key] = value.email()
else:
raise ValueError('cannot encode ' + repr(prop))
return output
# UMA metrics.
class StableInstance(DictModel):
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
property_name = db.StringProperty(required=True)
bucket_id = db.IntegerProperty(required=True)
date = db.DateProperty(verbose_name='When the data was fetched',
required=True)
#hits = db.IntegerProperty(required=True)
#total_pages = db.IntegerProperty()
day_percentage = db.FloatProperty()
rolling_percentage = db.FloatProperty()
class AnimatedProperty(StableInstance):
pass
class FeatureObserver(StableInstance):
pass
# Feature dashboard.
class Feature(DictModel):
"""Container for a feature."""
DEFAULT_MEMCACHE_KEY = '%s|features' % (settings.MEMCACHE_KEY_PREFIX)
def format_for_template(self):
d = self.to_dict()
d['id'] = self.key().id()
d['category'] = FEATURE_CATEGORIES[self.category]
d['visibility'] = VISIBILITY_CHOICES[self.visibility]
d['impl_status_chrome'] = IMPLEMENTATION_STATUS[self.impl_status_chrome]
d['meta'] = {
'experimentalframework': self.impl_status_chrome == EXPERIMENTAL_FRAMEWORK,
'needsflag': self.impl_status_chrome == BEHIND_A_FLAG,
'milestone_str': self.shipped_milestone or d['impl_status_chrome']
}
d['ff_views'] = {'value': self.ff_views,
'text': VENDOR_VIEWS[self.ff_views]}
d['ie_views'] = {'value': self.ie_views,
'text': VENDOR_VIEWS[self.ie_views]}
d['safari_views'] = {'value': self.safari_views,
'text': VENDOR_VIEWS[self.safari_views]}
d['standardization'] = {'value': self.standardization,
'text': STANDARDIZATION[self.standardization]}
d['web_dev_views'] = {'value': self.web_dev_views,
'text': WEB_DEV_VIEWS[self.web_dev_views]}
#d['owner'] = ', '.join(self.owner)
return d
def format_for_edit(self):
d = self.to_dict()
#d['id'] = self.key().id
d['owner'] = ', '.join(self.owner)
d['doc_links'] = '\r\n'.join(self.doc_links)
d['sample_links'] = '\r\n'.join(self.sample_links)
d['search_tags'] = ', '.join(self.search_tags)
return d
@classmethod
def get_all(self, limit=None, order='-updated', filterby=None,
update_cache=False):
KEY = '%s|%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, order, limit)
# TODO(ericbidelman): Support more than one filter.
if filterby is not None:
s = ('%s%s' % (filterby[0], filterby[1])).replace(' ', '')
KEY += '|%s' % s
feature_list = memcache.get(KEY)
if feature_list is None or update_cache:
query = Feature.all().order(order) #.order('name')
# TODO(ericbidelman): Support more than one filter.
if filterby:
query.filter(filterby[0], filterby[1])
features = query.fetch(limit)
feature_list = [f.format_for_template() for f in features]
memcache.set(KEY, feature_list)
return feature_list
@classmethod
def get_all_with_statuses(self, statuses, update_cache=False):
if not statuses:
return []
KEY = '%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, sorted(statuses))
feature_list = memcache.get(KEY)
if feature_list is None or update_cache:
# There's no way to do an OR in a single datastore query, and there's a
# very good chance that the self.get_all() results will already be in
# memcache, so use an array comprehension to grab the features we
# want from the array of everything.
feature_list = [feature for feature in self.get_all(update_cache=update_cache)
if feature['impl_status_chrome'] in statuses]
memcache.set(KEY, feature_list)
return feature_list
@classmethod
def get_feature(self, feature_id, update_cache=False):
KEY = '%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, feature_id)
feature = memcache.get(KEY)
if feature is None or update_cache:
unformatted_feature = Feature.get_by_id(feature_id)
if unformatted_feature:
feature = unformatted_feature.format_for_template()
feature['updated_display'] = unformatted_feature.updated.strftime("%Y-%m-%d")
memcache.set(KEY, feature)
return feature
@classmethod
def get_chronological(self, limit=None, update_cache=False):
KEY = '%s|%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, 'cronorder', limit)
feature_list = memcache.get(KEY)
if feature_list is None or update_cache:
q = Feature.all()
q.order('-shipped_milestone')
q.order('name')
features = q.fetch(None)
features = [f for f in features if (IN_DEVELOPMENT < f.impl_status_chrome < NO_LONGER_PURSUING)]
# Append no active, in dev, proposed features.
q = Feature.all()
q.order('impl_status_chrome')
q.order('name')
q.filter('impl_status_chrome <=', IN_DEVELOPMENT)
pre_release = q.fetch(None)
pre_release.extend(features)
# Append no longer pursuing features.
q = Feature.all()
q.order('impl_status_chrome')
q.order('name')
q.filter('impl_status_chrome =', NO_LONGER_PURSUING)
no_longer_pursuing = q.fetch(None)
pre_release.extend(no_longer_pursuing)
feature_list = [f.format_for_template() for f in pre_release]
memcache.set(KEY, feature_list)
return feature_list
@classmethod
def get_shipping_samples(self, limit=None, update_cache=False):
KEY = '%s|%s|%s' % (Feature.DEFAULT_MEMCACHE_KEY, 'samples', limit)
feature_list = memcache.get(KEY)
if feature_list is None or update_cache:
# Get all shipping features. Ordered by shipping milestone (latest first).
q = Feature.all()
q.filter('impl_status_chrome IN', [ENABLED_BY_DEFAULT, EXPERIMENTAL_FRAMEWORK])
q.order('-impl_status_chrome')
q.order('-shipped_milestone')
q.order('name')
features = q.fetch(None)
# Get non-shipping features (sans removed or deprecated ones) and
# append to bottom of list.
q = Feature.all()
q.filter('impl_status_chrome <', ENABLED_BY_DEFAULT)
q.order('-impl_status_chrome')
q.order('-shipped_milestone')
q.order('name')
others = q.fetch(None)
features.extend(others)
# Filter out features without sample links.
feature_list = [f.format_for_template() for f in features
if len(f.sample_links)]
memcache.set(KEY, feature_list)
return feature_list
# Metadata.
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
updated_by = db.UserProperty(auto_current_user=True)
created_by = db.UserProperty(auto_current_user_add=True)
# General info.
category = db.IntegerProperty(required=True)
name = db.StringProperty(required=True)
summary = db.StringProperty(required=True, multiline=True)
# Chromium details.
bug_url = db.LinkProperty()
impl_status_chrome = db.IntegerProperty(required=True)
shipped_milestone = db.IntegerProperty()
shipped_android_milestone = db.IntegerProperty()
shipped_ios_milestone = db.IntegerProperty()
shipped_webview_milestone = db.IntegerProperty()
shipped_opera_milestone = db.IntegerProperty()
shipped_opera_android_milestone = db.IntegerProperty()
owner = db.ListProperty(db.Email)
footprint = db.IntegerProperty()
visibility = db.IntegerProperty(required=True)
#webbiness = db.IntegerProperty() # TODO: figure out what this is
# Standards details.
standardization = db.IntegerProperty(required=True)
spec_link = db.LinkProperty()
prefixed = db.BooleanProperty()
ff_views = db.IntegerProperty(required=True, default=NO_PUBLIC_SIGNALS)
ie_views = db.IntegerProperty(required=True, default=NO_PUBLIC_SIGNALS)
safari_views = db.IntegerProperty(required=True, default=NO_PUBLIC_SIGNALS)
ff_views_link = db.LinkProperty()
ie_views_link = db.LinkProperty()
safari_views_link = db.LinkProperty()
# Web dev details.
web_dev_views = db.IntegerProperty(required=True)
doc_links = db.StringListProperty()
sample_links = db.StringListProperty()
#tests = db.StringProperty()
search_tags = db.StringListProperty()
comments = db.StringProperty(multiline=True)
class PlaceholderCharField(forms.CharField):
def __init__(self, *args, **kwargs):
#super(forms.CharField, self).__init__(*args, **kwargs)
attrs = {}
if kwargs.get('placeholder'):
attrs['placeholder'] = kwargs.get('placeholder')
del kwargs['placeholder']
label = kwargs.get('label') or ''
if label:
del kwargs['label']
self.max_length = kwargs.get('max_length') or None
super(forms.CharField, self).__init__(label=label,
widget=forms.TextInput(attrs=attrs), *args, **kwargs)
# class PlaceholderForm(forms.Form):
# def __init__(self, *args, **kwargs):
# super(PlaceholderForm, self).__init__(*args, **kwargs)
# for field_name in self.fields:
# field = self.fields.get(field_name)
# if field:
# if type(field.widget) in (forms.TextInput, forms.DateInput):
# field.widget = forms.TextInput(attrs={'placeholder': field.label})
class FeatureForm(forms.Form):
SHIPPED_HELP_TXT = ('First milestone the feature shipped with this status '
'(either enabled by default, experimental, or deprecated)')
#name = PlaceholderCharField(required=True, placeholder='Feature name')
name = forms.CharField(required=True, label='Feature')
summary = forms.CharField(label='', required=True, max_length=500,
widget=forms.Textarea(attrs={'cols': 50, 'placeholder': 'Summary description', 'maxlength': 500}))
# owner = PlaceholderCharField(
# required=False, placeholder='Owner(s) email',
# help_text='Comma separated list of full email addresses (@chromium.org preferred).')
category = forms.ChoiceField(required=True,
choices=sorted(FEATURE_CATEGORIES.items(), key=lambda x: x[1]))
owner = forms.CharField(
required=False, label='Owner(s) email',
help_text='Comma separated list of full email addresses. Prefer @chromium.org.')
bug_url = forms.URLField(required=False, label='Bug URL',
help_text='OWP Launch Tracking, crbug, etc.')
impl_status_chrome = forms.ChoiceField(required=True,
label='Status in Chrome',
choices=IMPLEMENTATION_STATUS.items())
#shipped_milestone = PlaceholderCharField(required=False,
# placeholder='First milestone the feature shipped with this status (either enabled by default or experimental)')
shipped_milestone = forms.IntegerField(required=False, label='',
help_text='Chrome for desktop: ' + SHIPPED_HELP_TXT)
shipped_android_milestone = forms.IntegerField(required=False, label='',
help_text='Chrome for Android: ' + SHIPPED_HELP_TXT)
shipped_ios_milestone = forms.IntegerField(required=False, label='',
help_text='Chrome for iOS: ' + SHIPPED_HELP_TXT)
shipped_webview_milestone = forms.IntegerField(required=False, label='',
help_text='Chrome for Android web view: ' + SHIPPED_HELP_TXT)
shipped_opera_milestone = forms.IntegerField(required=False, label='',
help_text='Opera for desktop: ' + SHIPPED_HELP_TXT)
shipped_opera_android_milestone = forms.IntegerField(required=False, label='',
help_text='Opera for Android: ' + SHIPPED_HELP_TXT)
prefixed = forms.BooleanField(
required=False, initial=False, label='Prefixed?')
standardization = forms.ChoiceField(
label='Standardization', choices=STANDARDIZATION.items(),
initial=EDITORS_DRAFT,
help_text=("The standardization status of the API. In bodies that don't "
"use this nomenclature, use the closest equivalent."))
spec_link = forms.URLField(required=False, label='Spec link',
help_text="Prefer editor's draft.")
doc_links = forms.CharField(label='Doc links', required=False, max_length=500,
widget=forms.Textarea(attrs={'cols': 50, 'placeholder': 'Links to documentation (one per line)', 'maxlength': 500}),
help_text='One URL per line')
sample_links = forms.CharField(label='Samples links', required=False, max_length=500,
widget=forms.Textarea(attrs={'cols': 50, 'placeholder': 'Links to samples (one per line)', 'maxlength': 500}),
help_text='One URL per line')
footprint = forms.ChoiceField(label='Technical footprint',
choices=FOOTPRINT_CHOICES.items(),
initial=MAJOR_MINOR_NEW_API)
visibility = forms.ChoiceField(
label='Developer visibility',
choices=VISIBILITY_CHOICES.items(),
initial=WARRANTS_ARTICLE,
help_text=('How much press / media / web developer buzz will this '
'feature generate?'))
web_dev_views = forms.ChoiceField(
label='Web developer views',
choices=WEB_DEV_VIEWS.items(),
initial=DEV_NO_SIGNALS,
help_text=('How positive has the reaction from developers been? If '
'unsure, default to "No signals".'))
safari_views = forms.ChoiceField(label='Safari views',
choices=VENDOR_VIEWS.items(),
initial=NO_PUBLIC_SIGNALS)
safari_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
ff_views = forms.ChoiceField(label='Firefox views',
choices=VENDOR_VIEWS.items(),
initial=NO_PUBLIC_SIGNALS)
ff_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
ie_views = forms.ChoiceField(label='Edge',
choices=VENDOR_VIEWS.items(),
initial=NO_PUBLIC_SIGNALS)
ie_views_link = forms.URLField(required=False, label='',
help_text='Citation link.')
search_tags = forms.CharField(label='Search tags', required=False,
help_text='Comma separated keywords used only in search')
comments = forms.CharField(label='', required=False, max_length=500, widget=forms.Textarea(
attrs={'cols': 50, 'placeholder': 'Additional comments, caveats, info...', 'maxlength': 500}))
class Meta:
model = Feature
#exclude = ('shipped_webview_milestone',)
def __init__(self, *args, **keyargs):
super(FeatureForm, self).__init__(*args, **keyargs)
meta = getattr(self, 'Meta', None)
exclude = getattr(meta, 'exclude', [])
for field_name in exclude:
if field_name in self.fields:
del self.fields[field_name]
for field, val in self.fields.iteritems():
if val.required:
self.fields[field].widget.attrs['required'] = 'required'
class AppUser(DictModel):
"""Describes a user for whitelisting."""
#user = db.UserProperty(required=True, verbose_name='Google Account')
email = db.EmailProperty(required=True)
#is_admin = db.BooleanProperty(default=False)
created = db.DateTimeProperty(auto_now_add=True)
updated = db.DateTimeProperty(auto_now=True)
def format_for_template(self):
d = self.to_dict()
d['id'] = self.key().id()
return d
class HistogramModel(db.Model):
"""Container for a histogram."""
bucket_id = db.IntegerProperty(required=True)
property_name = db.StringProperty(required=True)
@classmethod
def get_all(self):
output = {}
buckets = self.all().fetch(None)
for bucket in buckets:
output[bucket.bucket_id] = bucket.property_name
return output
class CssPropertyHistogram(HistogramModel):
pass
class FeatureObserverHistogram(HistogramModel):
pass
| apache-2.0 | -64,401,349,025,948,410 | 32.680952 | 155 | 0.66186 | false |
pferreir/indico-backup | indico/MaKaC/registration.py | 1 | 207929 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
from flask import request
import random, time
from uuid import uuid4
from hashlib import md5
from datetime import datetime, timedelta
from pytz import timezone
from pytz import all_timezones
from MaKaC.common.timezoneUtils import nowutc
from persistent import Persistent
from persistent.mapping import PersistentMapping
from persistent.list import PersistentList
import MaKaC
from indico.core.db import eticket
from MaKaC.common.Counter import Counter
from MaKaC.errors import FormValuesError, MaKaCError
from MaKaC.common.Locators import Locator
from indico.core.config import Config
from MaKaC.common.TemplateExec import inlineContextHelp
import MaKaC.webinterface.urlHandlers as urlHandlers
from MaKaC.common.info import HelperMaKaCInfo
from MaKaC.webinterface.common.tools import strip_ml_tags
from MaKaC.trashCan import TrashCanManager
from MaKaC.webinterface.mail import GenericMailer, GenericNotification
from MaKaC.i18n import _
from indico.util.i18n import i18nformat
from indico.util.date_time import format_datetime, format_date
from indico.util.string import safe_upper
from MaKaC.webinterface.common.countries import CountryHolder
import re
import tempfile, os
import string
from MaKaC.webinterface.common.person_titles import TitlesRegistry
from indico.util.fossilize import Fossilizable, fossilizes
from indico.core.fossils.registration import IRegFormTextInputFieldFossil, IRegFormTelephoneInputFieldFossil, \
IRegFormTextareaInputFieldFossil, IRegFormNumberInputFieldFossil, IRegFormLabelInputFieldFossil, \
IRegFormCheckboxInputFieldFossil, IRegFormYesNoInputFieldFossil, IRegFormFileInputFieldFossil, \
IRegFormRadioItemFossil, IRegFormRadioGroupInputFieldFossil, IRegFormCountryInputFieldFossil, \
IRegFormDateInputFieldFossil, IRegFormGeneralFieldFossil, IRegFormGeneralSectionFossil, \
IRegFormFurtherInformationSectionFossil, IRegFormAccommodationTypeItemFossil, IRegFormAccommodationSectionFossil, \
IRegFormReasonParticipationSectionFossil, IRegFormRegistrationSessionItemFossil, IRegFormSessionSectionFossil, \
IRegFormSocialEventItemFossil, IRegFormSocialEventSectionFossil, IRegFormRegistrantFossil, \
IRegFormRegistrantBasicFossil, IRegFormRegistrantFullFossil, IRegFormSocialEventFossil, IRegFormMiscellaneousInfoGroupFossil
PRICE_PATTERN = re.compile(r'^(\d+(?:[\.,]\d+)?)$')
def stringToDate(str):
months = {"January": 1, "February": 2, "March": 3, "April": 4, "May": 5, "June": 6,
"July": 7, "August": 8, "September": 9, "October": 10, "November": 11, "December": 12}
[day, month, year] = str.split("-")
return datetime(int(year), months[month], int(day))
class RegistrationForm(Persistent):
def __init__(self, conf, groupData=None, skipPersonalData=False):
self._conf = conf
if groupData is None:
self.activated = False
self.title = "Registration Form"
self.announcement = ""
self.usersLimit = 0
self.contactInfo = ""
self.setStartRegistrationDate(nowutc())
self.setEndRegistrationDate(nowutc())
self.setModificationEndDate(None)
self.setCurrency("not selected")
else:
self.activated = groupData.get("activated", False)
self.title = groupData.get("name", "")
self.announcement = groupData.get("announcement", "")
self.usersLimit = groupData.get("limit", "")
self.startRegistrationDate = groupData.get("startRegistrationDate", None)
if self.startRegistrationDate is None:
self.setStartRegistrationDate(nowutc())
self.endRegistrationDate = groupData.get("endRegistrationDate", None)
if self.endRegistrationDate is None:
self.setEndRegistrationDate(nowutc())
self._endExtraTimeAmount = 0
self._endExtraTimeUnit = 'days'
self.modificationEndDate = groupData.get("modificationEndDate", None)
#if self.modificationEndDate is None:
# self.setModificationEndDate(nowutc())
self.contactInfo = groupData.get("contactInfo", "")
self.setCurrency(groupData.get("Currency", ""))
self.notification = Notification()
self._eTicket = eticket.ETicket()
# Status definition
self._statuses = {}
self._statusesGenerator = Counter()
#Multiple-Subforms
if not skipPersonalData:
self.personalData = PersonalDataForm(self)
#Simple-SubForms
self.sessionsForm = SessionsForm()
self.accommodationForm = AccommodationForm(self)
self.reasonParticipationForm = ReasonParticipationForm()
self.furtherInformation = FurtherInformationForm()
self.socialEventForm = SocialEventForm(self)
#General-SubForms
self._generalSectionGenerator = Counter()
self.generalSectionForms = {}
if not skipPersonalData:
self.addGeneralSectionForm(self.personalData, True)
#All SortedForms
self._sortedForms = []
if not skipPersonalData:
self.addToSortedForms(self.personalData)
self.addToSortedForms(self.reasonParticipationForm)
self.addToSortedForms(self.sessionsForm)
self.addToSortedForms(self.accommodationForm)
self.addToSortedForms(self.socialEventForm)
self.addToSortedForms(self.furtherInformation)
self.setAllSessions()
def clone(self, conference):
form = RegistrationForm(conference, skipPersonalData=True)
form.setConference(conference)
form.setAnnouncement(self.getAnnouncement())
form.setContactInfo(self.getContactInfo())
form.setCurrency(self.getCurrency())
registrationPeriodEnd = self.getConference().getStartDate() - self.getEndRegistrationDate()
registrationPeriodStart = self.getConference().getStartDate() - self.getStartRegistrationDate()
form.setEndRegistrationDate(conference.getStartDate() - registrationPeriodEnd)
form.setEndExtraTimeAmount(self.getEndExtraTimeAmount())
form.setEndExtraTimeUnit(self.getEndExtraTimeUnit())
form.setStartRegistrationDate(conference.getStartDate() - registrationPeriodStart)
if self.getModificationEndDate():
registrationPeriodModifEndDate = self.getConference().getStartDate() - self.getModificationEndDate()
form.setModificationEndDate(conference.getStartDate() - registrationPeriodModifEndDate)
form.setTitle(self.getTitle())
form.setUsersLimit(self.getUsersLimit())
form.setActivated(self.isActivated())
form.setMandatoryAccount(self.isMandatoryAccount())
form.setNotificationSender(self.getNotificationSender())
form.setSendRegEmail(self.isSendRegEmail())
form.setSendReceiptEmail(self.isSendReceiptEmail())
form.setSendPaidEmail(self.isSendPaidEmail())
form.setAllSessions()
form.notification = self.getNotification().clone()
form._eTicket = self.getETicket().clone()
form.personalData = self.getPersonalData().clone(form)
form.generalSectionForms[form.personalData.getId()] = form.personalData
acf = self.getAccommodationForm()
if acf is not None:
form.accommodationForm = acf.clone(form)
fif = self.getFurtherInformationForm()
if fif is not None:
form.furtherInformation = fif.clone()
rpf = self.getReasonParticipationForm()
if rpf is not None:
form.reasonParticipationForm = rpf.clone()
form.setAllSessions()
ses = self.getSessionsForm()
if ses is not None:
form.sessionsForm = ses.clone(form.sessionsForm.getSessionList())
sef = self.getSocialEventForm()
if sef is not None:
form.socialEventForm = sef.clone(form)
form._sortedForms = []
for item in self.getSortedForms():
clonedItem = form.getSectionById(item.getId())
if clonedItem is None: # General Section, not cloned yet
clonedItem = item.clone(form)
form.generalSectionForms[clonedItem.getId()] = clonedItem
form.addToSortedForms(clonedItem)
return form
def getCurrency(self):
try:
return self._currency
except:
self.setCurrency("not selected")
return self._currency
def setCurrency(self, currency):
self._currency = currency
def getConference(self):
return self._conf
getOwner = getConference
def getTimezone(self):
return self.getConference().getTimezone()
def setConference(self, conf):
self._conf = conf
setOwner = setConference
def setAllSessions(self):
for ses in self._conf.getSessionList():
rs = RegistrationSession(ses, self)
self.sessionsForm.addSession(rs)
def isActivated(self):
return self.activated
def activate(self):
self.activated = True
def deactivate(self):
self.activated = False
def setActivated(self, value):
self.activated = value
def isMandatoryAccount(self):
try:
if self._mandatoryAccount:
pass
except AttributeError, e:
self._mandatoryAccount = True
return self._mandatoryAccount
def setMandatoryAccount(self, v=True):
self._mandatoryAccount = v
def setNotificationSender(self, sender):
self._notificationSender = sender
def getNotificationSender(self):
sender = None
try:
if self._notificationSender:
sender = self._notificationSender
except AttributeError, e:
pass
if not sender:
self._notificationSender = self._conf.getSupportInfo().getEmail(returnNoReply=True).split(',', 1)[0]
return self._notificationSender
def isSendRegEmail(self):
try:
if self._sendRegEmail:
pass
except AttributeError, e:
self._sendRegEmail = True
return self._sendRegEmail
def setSendRegEmail(self, v=True):
self._sendRegEmail = v
def isSendReceiptEmail(self):
try:
if self._sendReceiptEmail:
pass
except AttributeError, e:
self._sendReceiptEmail = False
return self._sendReceiptEmail
def setSendReceiptEmail(self, v=True):
self._sendReceiptEmail = v
def isSendPaidEmail(self):
try:
if self._sendPaidEmail:
pass
except AttributeError, e:
self._sendPaidEmail = False
return self._sendPaidEmail
def setSendPaidEmail(self, v=True):
self._sendPaidEmail = v
def setTitle(self, newName):
self.title = newName.strip()
def getTitle(self):
return self.title
def setAnnouncement(self, newDesc):
self.announcement = newDesc.strip()
def getAnnouncement(self):
return self.announcement
def setUsersLimit(self, newLimit):
if isinstance(newLimit, int):
self.usersLimit = newLimit
elif isinstance(newLimit, str):
if newLimit.strip() == "":
self.usersLimit = 0
else:
self.usersLimit = int(newLimit.strip())
if self.usersLimit < 0:
self.usersLimit = 0
def getUsersLimit(self):
return self.usersLimit
def isFull(self):
if self.usersLimit != 0:
return len(self.getConference().getRegistrants()) >= self.usersLimit
return False
def setStartRegistrationDate(self, sd):
self.startRegistrationDate = datetime(sd.year, sd.month, sd.day, 0, 0, 0)
def getStartRegistrationDate(self):
return timezone(self.getTimezone()).localize(self.startRegistrationDate)
def setEndRegistrationDate(self, ed):
self.endRegistrationDate = datetime(ed.year, ed.month, ed.day, 23, 59, 59)
def getEndRegistrationDate(self):
return timezone(self.getTimezone()).localize(self.endRegistrationDate)
def getAllowedEndRegistrationDate(self):
if self.getEndExtraTimeUnit() == 'days':
delta = timedelta(days=self.getEndExtraTimeAmount())
else:
delta = timedelta(weeks=self.getEndExtraTimeAmount())
return timezone(self.getTimezone()).localize(self.endRegistrationDate + delta)
def setEndExtraTimeAmount(self, value):
self._endExtraTimeAmount = value
def getEndExtraTimeAmount(self):
try:
return self._endExtraTimeAmount
except AttributeError:
self._endExtraTimeAmount = 0
return self._endExtraTimeAmount
def setEndExtraTimeUnit(self, value):
self._endExtraTimeUnit = value
def getEndExtraTimeUnit(self):
try:
return self._endExtraTimeUnit
except AttributeError:
self._endExtraTimeUnit = 'days'
return self._endExtraTimeUnit
def setModificationEndDate(self, ed):
if ed:
self.modificationEndDate = datetime(ed.year, ed.month, ed.day, 23, 59, 59)
else:
self.modificationEndDate = None
def getModificationEndDate(self):
try:
if self.modificationEndDate:
return timezone(self.getTimezone()).localize(self.modificationEndDate)
except AttributeError, e:
pass
return None
def inModificationPeriod(self):
if self.getModificationEndDate() is None:
return False
date = nowutc()
sd = self.getStartRegistrationDate()
ed = self.getModificationEndDate()
return date <= ed and date >= sd
def inRegistrationPeriod(self, date=None):
if date is None:
date = nowutc()
sd = self.getStartRegistrationDate()
ed = self.getAllowedEndRegistrationDate()
return date <= ed and date >= sd
def setContactInfo(self, ci):
self.contactInfo = ci
def getContactInfo(self):
return self.contactInfo
def getStatuses(self):
try:
if self._statuses:
pass
except AttributeError, e:
self._statuses = {}
return self._statuses
def _generateStatusId(self):
try:
if self._statusesGenerator:
pass
except AttributeError, e:
self._statusesGenerator = Counter()
return self._statusesGenerator
def getStatusesList(self, sort=True):
v = self.getStatuses().values()
if sort:
v.sort(Status._cmpCaption)
return v
def getStatusById(self, id):
if self.getStatuses().has_key(id):
return self.getStatuses()[id]
return None
def addStatus(self, st):
st.setId(str(self._generateStatusId().newCount()))
self.getStatuses()[st.getId()] = st
self.notifyModification()
def removeStatus(self, st):
if self.getStatuses().has_key(st.getId()):
del self.getStatuses()[st.getId()]
self.notifyModification()
def getNotification(self):
try:
if self.notification:
pass
except:
self.notification = Notification()
return self.notification
def _convertPersonalData(self):
if isinstance(self.personalData, PersonalDataForm):
return
pd = PersonalDataForm(self)
self.addGeneralSectionForm(pd, True, 0)
for f in pd.getSortedFields():
f.setDisabled(not self.personalData.getDataItem(f.getPDField()).isEnabled())
f.setMandatory(self.personalData.getDataItem(f.getPDField()).isMandatory())
for registrant in self.getConference().getRegistrants().itervalues():
mg = MiscellaneousInfoGroup(registrant, pd)
registrant.addMiscellaneousGroup(mg)
for f in pd.getSortedFields():
val = getattr(registrant, '_' + f.getPDField())
# radiobuttons are numerically indexed
if f.getCaption() == "Title":
try:
val = str(TitlesRegistry._items.index(val))
except ValueError:
# can happen for older events with obsolete titles
val = "0"
fakeParams = {f.getInput().getHTMLName(): val}
f.getInput().setResponseValue(mg.getResponseItemById(f.getId()), fakeParams, registrant, mg, override=True, validate=False)
self.personalData = pd
def getPersonalData(self):
self._convertPersonalData()
return self.personalData
def getFurtherInformationForm(self):
return self.furtherInformation
def getSessionsForm(self):
return self.sessionsForm
def getAccommodationForm(self):
return self.accommodationForm
def getSocialEventForm(self):
return self.socialEventForm
def getReasonParticipationForm(self):
return self.reasonParticipationForm
def getSectionById(self, id):
if id == "reasonParticipation":
return self.getReasonParticipationForm()
if id == "sessions":
return self.getSessionsForm()
if id == "accommodation":
return self.getAccommodationForm()
if id == "socialEvents":
return self.getSocialEventForm()
if id == "furtherInformation":
return self.getFurtherInformationForm()
return self.getGeneralSectionFormById(id)
def _getGeneralSectionGenerator(self):
try:
if self._generalSectionGenerator:
pass
except AttributeError, e:
self._generalSectionGenerator = Counter()
return self._generalSectionGenerator
def getGeneralSectionForms(self):
try:
if self.generalSectionForms:
pass
except AttributeError, e:
self.generalSectionForms = {}
return self.generalSectionForms
def getGeneralSectionFormById(self, id):
return self.getGeneralSectionForms().get(id, None)
def getGeneralSectionFormsList(self):
return self.getGeneralSectionForms().values()
def addGeneralSectionForm(self, gsf, preserveTitle=False, pos=None):
id = str(self._getGeneralSectionGenerator().newCount())
while self.getGeneralSectionFormById(id) is not None:
id = str(self._getGeneralSectionGenerator().newCount())
gsf.setId(id)
if not preserveTitle:
gsf.setTitle("Miscellaneous information %s" % gsf.getId())
self.generalSectionForms[gsf.getId()] = gsf
self.addToSortedForms(gsf, pos)
self.notifyModification()
def removeGeneralSectionForm(self, gsf):
if self.hasGeneralSectionForm(gsf):
del self.generalSectionForms[gsf.getId()]
self.removeFromSortedForms(gsf)
self.notifyModification()
def hasGeneralSectionForm(self, gsf):
return self.getGeneralSectionForms().has_key(gsf.getId())
def getSortedForms(self):
try:
if self._sortedForms:
pass
except AttributeError, e:
self._sortedForms = []
self.addToSortedForms(self.reasonParticipationForm)
self.addToSortedForms(self.sessionsForm)
self.addToSortedForms(self.accommodationForm)
self.addToSortedForms(self.socialEventForm)
self.addToSortedForms(self.furtherInformation)
for gs in self.getGeneralSectionFormsList():
self.addToSortedForms(gs)
return self._sortedForms
def addToSortedForms(self, form, i=None):
if i is None:
i = len(self.getSortedForms())
try:
self.getSortedForms().remove(form)
except ValueError, e:
pass
self.getSortedForms().insert(i, form)
self.notifyModification()
return True
def removeFromSortedForms(self, form):
try:
self.getSortedForms().remove(form)
except ValueError, e:
return False
self.notifyModification()
return True
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the RegistrationForm instance """
if self.getConference() is None:
return Locator()
lconf = self.getConference().getLocator()
return lconf
def notifyRegistrantRemoval(self, reg):
acco = reg.getAccommodation()
if acco is not None:
accoType = acco.getAccommodationType()
if accoType is not None:
accoType.decreaseNoPlaces()
for se in reg.getSocialEvents():
se.delete() # It'll decrease the no of places
for mg in reg.getMiscellaneousGroupList():
for item in mg.getResponseItemList():
item.getGeneralField().getInput()._beforeValueChange(item, False)
for attachment in reg.getAttachments().keys():
reg.deleteFile(attachment)
def delete(self):
self.getSessionsForm().clearSessionList()
TrashCanManager().add(self)
def recover(self):
TrashCanManager().remove(self)
def notifyModification(self):
self._p_changed = 1
self._conf.notifyModification()
def getETicket(self):
try:
return self._eTicket
except AttributeError:
self._eTicket = eticket.ETicket()
return self._eTicket
class Notification(Persistent):
def __init__(self):
self._toList = PersistentList()
self._ccList = PersistentList()
def clone(self):
n = Notification()
for t in self.getToList():
n.addToList(t)
for c in self.getCCList():
n.addCCList(c)
return n
def getToList(self):
return self._toList
def setToList(self, tl):
self._toList = tl
def addToList(self, to):
self._toList.append(to)
def clearToList(self):
self._toList = PersistentList()
def getCCList(self):
return self._ccList
def setCCList(self, cl):
self._ccList = cl
def addCCList(self, cc):
self._ccList.append(cc)
def clearCCList(self):
self._ccList = PersistentList()
def _printSessions(self, sessionForm, sessionList):
text = ""
if sessionForm.isEnabled():
if sessionForm.getType() == "2priorities":
session1 = i18nformat("""--_("not selected")--""")
if len(sessionList) > 0:
session1 = sessionList[0].getTitle()
session2 = i18nformat("""--_("not selected")--""")
if len(sessionList) > 1:
session2 = sessionList[1].getTitle()
text = i18nformat("""%s
- _("First priority"): %s
- _("Other option"): %s
""") % (self._printTitle(sessionForm.getTitle()), session1, session2)
else:
sessionListText = []
for s in sessionList:
sessionListText.append("\n%s" % s.getTitle())
text = """%s%s
""" % (self._printTitle(sessionForm.getTitle()), "".join(sessionListText))
return text
def _printAccommodation(self, accommodationForm, accommodation):
text = ""
if accommodationForm.isEnabled():
accoType = i18nformat("""--_("not selected")--""")
if accommodation.getAccommodationType() is not None:
accoType = accommodation.getAccommodationType().getCaption()
text = i18nformat("""%s- _("Arrival date"): %s
- _("Departure date"): %s
- _("Accommodation type"): %s""") % (self._printTitle(accommodationForm.getTitle()), \
accommodation.getArrivalDate().strftime("%d %B %Y"), \
accommodation.getDepartureDate().strftime("%d %B %Y"), \
accoType)
return text
def _printSocialEvents(self, socialEventForm, socialEvents):
text = ""
if socialEventForm.isEnabled():
se = []
for item in socialEvents:
se.append(_("- %s [%s place(s) needed]") % (item.getCaption(), item.getNoPlaces()))
text = ""
if se != []:
text = """%s
%s
""" % (self._printTitle(socialEventForm.getTitle()), "\n".join(se) or i18nformat("""--_("No social events selected")--"""))
return text
def _printReasonParticipation(self, reasonParticipationForm, reasonParticipation):
text = ""
if reasonParticipationForm.isEnabled():
text = """%s%s
""" % (self._printTitle(reasonParticipationForm.getTitle()), reasonParticipation)
return text
def _printTitle(self, title):
sep = '-----------------------------------'
return "\n%s\n%s\n%s\n\n" % (sep, title, sep)
def _formatValue(self, fieldInput, value):
try:
value = str(fieldInput.getValueDisplay(value))
except:
value = str(value).strip()
if len(value) > 50:
value = '\n\n%s\n' % value
return value
def _printMiscellaneousInfo(self, gs, mig):
text = []
if gs.isEnabled():
if mig is not None:
noitems = True
text.append(self._printTitle(mig.getTitle()))
#Mods to support sorting fields
#for f in gs.getFields():
for f in gs.getSortedFields():
mii = mig.getResponseItemById(f.getId())
if mii is not None:
noitems = False
caption = mii.getCaption()
value = mii.getValue()
fieldInput = mii.getGeneralField().getInput()
isLabel = isinstance(fieldInput, LabelInput)
if isLabel and mii.isBillable():
value = "%s %s" % (mii.getPrice(), mii.getCurrency())
elif isLabel:
value = ""
if isLabel and not value:
text.append("""- %s\n""" % caption)
else:
text.append("""- %s: %s\n""" % (caption, self._formatValue(fieldInput, value)))
if noitems:
text.append("""-- no values --\n""")
text.append("\n")
return "".join(text)
def _printAllSections(self, regForm, rp):
sects = []
for formSection in regForm.getSortedForms():
if formSection.getId() == "reasonParticipation":
sects.append("""\n%s""" % self._printReasonParticipation(formSection, rp.getReasonParticipation()))
elif formSection.getId() == "sessions":
sects.append("""\n%s""" % self._printSessions(formSection, rp.getSessionList()))
elif formSection.getId() == "accommodation":
sects.append("""\n%s""" % self._printAccommodation(formSection, rp.getAccommodation()))
elif formSection.getId() == "socialEvents":
sects.append("""\n%s""" % self._printSocialEvents(formSection, rp.getSocialEvents()))
elif formSection.getId() == "furtherInformation":
pass
else:
sects.append("""%s""" % self._printMiscellaneousInfo(formSection, rp.getMiscellaneousGroupById(formSection.getId())))
return "".join(s.decode('utf-8') for s in sects).encode('utf-8')
def _cleanBody(self, body):
# format the line-breaks in unix-style
body = re.sub(r'\r\n', '\n', body)
# clean the extra lines and space
body = re.sub(r'\n(\s*\n){2,}', '\n\n', body)
return body
def createEmailNewRegistrant(self, regForm, rp):
"""
Creates an email to be sent to the user after registration
"""
fromAddr = regForm.getNotificationSender()
url = urlHandlers.UHConferenceDisplay.getURL(regForm.getConference())
# if rp.getConference().getModPay().isActivated():
if rp.getConference().getModPay().isActivated() and rp.doPay():
epaymentLink = "If you haven't paid for your registration yet, you can do it at %s" % urlHandlers.UHConfRegistrationFormCreationDone.getURL(rp)
paymentWarning = ", but please, do not forget to proceed with the payment if you haven't done it yet (see the link at the end of this email)."
else:
epaymentLink = ""
paymentWarning = "."
subject = _("""New registrant in '%s': %s""") % (strip_ml_tags(regForm.getConference().getTitle()), rp.getFullName())
body = i18nformat("""
_("Event"): %s
_("Registrant Id"): %s
%s
""") % (url, rp.getId(), \
self._printAllSections(regForm, rp))
# send mail to organisers
if self.getToList() != [] or self.getCCList() != []:
bodyOrg = _("""
There is a new registrant (%s) in '%s'. See information below:
%s
""") % (rp.getFullName(), strip_ml_tags(regForm.getConference().getTitle()), body)
bodyOrg = self._cleanBody(bodyOrg)
maildata = {"fromAddr": fromAddr, "toList": self.getToList(), "ccList": self.getCCList(),
"subject": subject, "body": bodyOrg}
GenericMailer.send(GenericNotification(maildata))
# send mail to participant
bodyReg = _("""
Congratulations, your registration to %s was successful%s See your information below:
%s
%s
""") % (strip_ml_tags(regForm.getConference().getTitle()), paymentWarning, body, epaymentLink)
return {
"fromAddr": fromAddr,
"toList": [rp.getEmail().strip()],
"subject": subject,
"body": self._cleanBody(bodyReg)
}
def sendEmailNewRegistrant(self, regForm, rp):
"""
Creates and sends an email to the user after registration.
Returns True if suceeded otherwise False.
"""
email = self.createEmailNewRegistrant(regForm, rp)
if email:
GenericMailer.send(GenericNotification(email))
return True
else:
return False
def sendEmailNewRegistrantDetailsPay(self, regForm, registrant):
if not registrant.getConference().getModPay().isEnableSendEmailPaymentDetails():
return
fromAddr = regForm.getNotificationSender()
date = registrant.getConference().getStartDate()
getTitle = strip_ml_tags(registrant.getConference().getTitle())
idRegistrant = registrant.getIdPay()
detailPayment = registrant.getConference().getModPay().getPaymentDetails()
subject = _("""Payment summary for '%s': %s""") % (strip_ml_tags(registrant.getConference().getTitle()), registrant.getFullName())
body = _("""
Please use this information for your payment (except for e-payment):\n
- date conference : %s
- name conference : %s
- registration id : %s
- detail of payment : \n%s
""") % (date, getTitle, idRegistrant, strip_ml_tags(detailPayment))
booking = []
total = 0
booking.append(_("""{0}{1}{2}{3}""".format("Quantity".ljust(20), "Item".ljust(50),
"Unit price".ljust(15), "Cost".ljust(20))))
#All billable general fields
for gsf in registrant.getMiscellaneousGroupList():
miscGroup = registrant.getMiscellaneousGroupById(gsf.getId())
if miscGroup is not None:
for miscItem in miscGroup.getResponseItemList():
price = 0.0
quantity = 0
caption = miscItem.getCaption()
currency = miscItem.getCurrency()
value = ""
if miscItem is not None:
v = miscItem.getValue()
if miscItem.isBillable():
value = miscItem.getValue()
price = string.atof(miscItem.getPrice())
quantity = miscItem.getQuantity()
total += price * quantity
if value != "":
value = ":%s" % value
if(quantity > 0):
booking.append("{0}{1}{2}{3}".format(str(quantity).ljust(20),
"{0} : {1}{2}".format(miscGroup.getTitle(), caption, value).ljust(50), str(price).ljust(15),
"{0} {1}".format(price * quantity, currency).ljust(20)))
#All billable standard fields (accommodation, sessions, social events)
for bf in registrant.getBilledForms():
for item in bf.getBilledItems():
caption = item.getCaption()
currency = item.getCurrency()
price = item.getPrice()
quantity = item.getQuantity()
total += price * quantity
if quantity > 0:
booking.append("\n{0}{1}{2}{3}".format(str(quantity).ljust(20), caption.ljust(50),
str(price).ljust(15),
"{0} {1}".format(price * quantity, currency).ljust(20)))
booking.append("{0}{1}".format("TOTAL".ljust(85), "{0}{1}".format(total, regForm.getCurrency()).ljust(20)))
# send email to organisers
#if self.getToList() != [] or self.getCCList() != []:
# bodyOrg = """
# There is a new registrant in '%s'. See information below:
#
# %s
# """%(strip_ml_tags(registrant.getConference().getTitle()), \
# body)
# maildata = { "fromAddr": fromAddr, "toList": self.getToList(), "ccList": self.getCCList(), "subject": subject, "body": bodyOrg }
# GenericMailer.send(GenericNotification(maildata))
# send email to participants
paymentMsg = _("If you haven't paid for your registration yet, you can do it at %s") % urlHandlers.UHConfRegistrationFormCreationDone.getURL(registrant)
if registrant.getEmail().strip() != "":
bodyReg = _("""%s\n\n%s\n\n%s\n\n%s""") % (
registrant.getConference().getModPay().getPaymentReceiptMsg(),
"\n".join(booking), body, paymentMsg)
to = registrant.getEmail().strip()
maildata = { "fromAddr": fromAddr, "toList": [to], "subject": subject, "body": bodyReg }
GenericMailer.send(GenericNotification(maildata))
def sendEmailNewRegistrantConfirmPay(self, regForm, registrant):
fromAddr = regForm.getNotificationSender()
date = registrant.getConference().getStartDate()
getTitle = strip_ml_tags(registrant.getConference().getTitle())
idRegistrant = registrant.getIdPay()
subject = _("""Payment successful for '%s': %s""") % (strip_ml_tags(registrant.getConference().getTitle()), registrant.getFullName())
body = _("""- detail of payment : \n%s
- date conference : %s
- name conference : %s
- registration id : %s""") % (registrant.getTransactionInfo().getTransactionTxt(), date, getTitle, idRegistrant)
booking = []
total = 0
booking.append("""Quantity\t\tItem\t\tunit.price\t\tCost""")
for gsf in registrant.getMiscellaneousGroupList():
miscGroup = registrant.getMiscellaneousGroupById(gsf.getId())
if miscGroup is not None:
for miscItem in miscGroup.getResponseItemList():
price = 0.0
quantity = 0
caption = miscItem.getCaption()
currency = miscItem.getCurrency()
v = ""
if miscItem is not None:
v = miscItem.getValue()
if miscItem.isBillable():
v = miscItem.getValue()
price = string.atof(miscItem.getPrice())
quantity = miscItem.getQuantity()
total += price * quantity
if v != "":
v = ":%s" % v
if(quantity > 0):
booking.append("""%i\t\t%s : %s%s\t\t%s\t\t%s %s""" % \
(quantity, gsf.getTitle(), caption, v, price, price * quantity, currency))
for bf in registrant.getBilledForms():
for item in bf.getBilledItems():
caption = item.getCaption()
currency = item.getCurrency()
price = item.getPrice()
quantity = item.getQuantity()
total += price * quantity
if quantity > 0:
booking.append("""%i\t\t%s\t\t%s\t\t%s %s""" % (quantity, caption, price, price * quantity, currency))
booking.append("""\nTOTAL\t\t\t\t\t\t\t%s %s""" % (total, regForm.getCurrency()))
# send email to organisers
if self.getToList() != [] or self.getCCList() != []:
bodyOrg = _("""
There is a new registrant (%s) in '%s'. See information below:
%s
""") % (registrant.getFullName(), strip_ml_tags(registrant.getConference().getTitle()), body)
maildata = { "fromAddr": fromAddr, "toList": self.getToList(), "ccList": self.getCCList(), "subject": subject, "body": bodyOrg }
GenericMailer.send(GenericNotification(maildata))
# send email to participant
if regForm.isSendPaidEmail() and registrant.getEmail().strip() != "":
bodyReg = _("""%s\n\n%s\n\n%s""") % (registrant.getConference().getModPay().getPaymentSuccessMsg(),
"\n".join(booking),
body)
to = registrant.getEmail().strip()
maildata = { "fromAddr": fromAddr, "toList": [to], "subject": subject, "body": bodyReg }
GenericMailer.send(GenericNotification(maildata))
def sendEmailModificationRegistrant(self, regForm, rp):
fromAddr = regForm.getNotificationSender()
subject = _("""Registration modified for '%s': %s""") % (strip_ml_tags(regForm.getConference().getTitle()), rp.getFullName())
body = i18nformat("""
_("Registrant Id"): %s
_("Title"): %s
_("Family Name"): %s
_("First Name"): %s
_("Position"): %s
_("Institution"): %s
_("Address"): %s
_("City"): %s
_("Country"): %s
_("Phone"): %s
_("Fax"): %s
_("Email"): %s
_("Personal Homepage"): %s
%s
""") % (rp.getId(), \
rp.getTitle(), \
rp.getFamilyName(), \
rp.getFirstName(), \
rp.getPosition(), \
rp.getInstitution(), \
rp.getAddress(), \
rp.getCity(), \
rp.getCountry(), \
rp.getPhone(), \
rp.getFax(), \
rp.getEmail(), \
rp.getPersonalHomepage(), \
self._printAllSections(regForm, rp))
if self.getToList() != [] or self.getCCList() != []:
bodyOrg = _("""
A registrant (%s) has modified his/her registration for '%s'. See information below:
%s
""") % (rp.getFullName(), strip_ml_tags(regForm.getConference().getTitle()), body)
bodyOrg = self._cleanBody(bodyOrg)
maildata = { "fromAddr": fromAddr, "toList": self.getToList(), "ccList": self.getCCList(), "subject": subject, "body": bodyOrg }
GenericMailer.send(GenericNotification(maildata))
def exportXml(self, xmlGen):
"""Write xml tags about this object in the given xml generator of type XMLGen."""
xmlGen.openTag("notification")
xmlGen.writeTag("toList", ", ".join(self.getToList()))
xmlGen.writeTag("ccList", ", ".join(self.getCCList()))
xmlGen.closeTag("notification")
class BaseForm(Persistent):
"""
Base class for registration forms
It includes iterators/getters, provided if the class attribute
_iterableContainer is present. _iterableContainer is a simple workaround for
the problem of having a generic iterator interface over all the forms, even
if the initial design didn't unify the form container into a BaseForm
attribute. Since it is too late now for redesigning the DB schema, this
attribute kind of fixes it.
"""
# should be overloaded if iteration is to be provided
_iterableContainer = None
def __init__(self):
self._enabled = True # it means that the form cannot be used either in the registration display or in the management area.
def setEnabled(self, v):
self._enabled = v
def isEnabled(self):
try:
if self._enabled:
pass
except AttributeError, e:
self._enabled = True
return self._enabled
def __iter__(self):
return getattr(self, self._iterableContainer).__iter__();
def __getitem__(self, key):
return getattr(self, self._iterableContainer)[key]
class FieldInputType(Persistent):
_id = ""
_useLabelCol = True
_wholeRow = False
def __init__(self, field):
self._parent = field
def getValues(self):
return {}
def setValues(self, data):
pass
def getParent(self):
return self._parent
def setId(cls, id):
cls._id = id
setId = classmethod(setId)
def getId(cls):
return cls._id
getId = classmethod(getId)
def getName(cls):
return cls._id
getName = classmethod(getName)
def getHTMLName(self):
"""
This method returns the indentifier of the field item in the web form.
"""
return "*genfield*%s-%s" % (self.getParent().getParent().getId(), self.getParent().getId())
def getModifLabelCol(self):
if not self._useLabelCol:
return ""
return self._parent.getCaption()
def useWholeRow(self):
return self._wholeRow
def getMandatoryCol(self, item):
mandatory = ""
if (item is not None and item.isMandatory()) or self.getParent().isMandatory():
mandatory = """<span class="regFormMandatoryField">*</span>"""
return mandatory
def getModifHTML(self, item, registrant, default=""):
"""
Method that display the form web which represents this object.
"""
return "<table><tr>%s</tr></table>" % (self._getModifHTML(item, registrant, default))
def _getModifHTML(self, item, registrant, default=""):
"""
Method that should be overwritten by the classes inheriting from this one in order to display
the form web which represents this object.
"""
return ""
def setResponseValue(self, item, params, registrant, mg=None, override=False, validate=True):
"""
This method shouldn't be called from the classes inheriting from this one (FieldInputType).
This method fills the attribute "item" (MiscellaneousInfoSimpleItem) with the value the user wrote
in the registration form.
"""
if item is None:
item = MiscellaneousInfoSimpleItem(mg, self.getParent())
mg.addResponseItem(item)
self._beforeValueChange(item, True)
else:
self._beforeValueChange(item, False)
self._setResponseValue(item, params, registrant, override=override, validate=validate)
self._afterValueChange(item)
def _beforeValueChange(self, item, newItem):
# if the item had a quantity, make the place available again
if not newItem and item.getQuantity():
self.getParent().decreaseNoPlaces()
def _afterValueChange(self, item):
# if the item has a quantity now, make the place unavailable
if item.getQuantity():
self.getParent().increaseNoPlaces()
def _setResponseValue(self, item, registrant, params, override=False, validate=True):
"""
Method that should be overwritten by the classes inheriting from this one in order to get the value written in the form.
"""
pass
def _getSpecialOptionsHTML(self):
price = self._parent.getPrice()
billable = self._parent.isBillable()
checked = ""
if billable:
checked = "checked=\"checked\""
html = i18nformat(""" <tr>
<td class="titleCellTD"><span class="titleCellFormat">Is Billable</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="checkbox" name="billable" size="60" %s> _("(uncheck if it is not billable)")
</td>
</tr>
<tr>
<td class="titleCellTD"><span class="titleCellFormat"> _("Price")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="price" size="60" value=%s>
</td>
</tr>
""") % (checked, price)
return "".join(html)
def _getDescriptionHTML(self, description):
return """<span class="inputDescription">%s</span>""" % description
def clone(self, gf):
fi = FieldInputs().getAvailableInputKlassById(self.getId())(gf)
return fi
class TextInput(FieldInputType, Fossilizable):
fossilizes(IRegFormTextInputFieldFossil)
_id = "text"
def getName(cls):
return "Text"
getName = classmethod(getName)
def __init__(self, field):
FieldInputType.__init__(self, field)
self._length = ''
def _getModifHTML(self, item, registrant, default=""):
description = self._parent.getDescription()
price = self._parent.getPrice()
billable = self._parent.isBillable()
currency = self._parent.getParent().getRegistrationForm().getCurrency()
htmlName = self.getHTMLName()
v = default
if item is not None:
v = item.getValue()
price = item.getPrice()
billable = item.isBillable()
currency = item.getCurrency()
htmlName = item.getHTMLName()
disable = ""
if (registrant is not None and billable and registrant.getPayed()):
disable = "disabled=\"true\""
#pass
if self._parent.getPDField() == 'email':
param = """<script>addParam($E('%s'), 'email', %s);</script>""" % (htmlName, 'false' if self._parent.isMandatory() else 'true')
elif self._parent.isMandatory():
param = """<script>addParam($E('%s'), 'text', false);</script>""" % htmlName
else:
param = ''
if self.getLength():
length = 'size="%s"' % self.getLength()
else:
length = 'size="60"'
tmp = """<input type="text" id="%s" name="%s" value="%s" %s %s >%s""" % (htmlName, htmlName, v , disable, length, param)
tmp = """ <td>%s</td><td align="right" align="bottom">""" % tmp
if billable:
tmp = """%s %s %s</td> """ % (tmp, price, currency)
else:
tmp = """%s </td> """ % tmp
if description:
tmp = """%s</tr><tr><td colspan="2">%s</td>""" % (tmp, self._getDescriptionHTML(description))
return tmp
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
if (registrant is not None and self._parent.isBillable() and registrant.getPayed()):
#if ( item is not None and item.isBillable()):
#######################
# if the registrant has already payed, Indico blocks all the modifications about new/removed items
return
v = params.get(self.getHTMLName(), "")
if not override and self.getParent().isMandatory() and v.strip() == "":
raise FormValuesError(_("The field \"%s\" is mandatory. Please fill it.") % self.getParent().getCaption())
item.setQuantity(0)
item.setValue(v)
#item.setBillable(self._parent.isBillable())
#item.setPrice(self._parent.getPrice())
#item.setCurrency(self._parent.getParent().getRegistrationForm().getCurrency())
item.setMandatory(self.getParent().isMandatory())
item.setHTMLName(self.getHTMLName())
def _getSpecialOptionsHTML(self):
return i18nformat("""
<tr>
<td class="titleCellTD"><span class="titleCellFormat">_("Size in chars")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="length" value="%s" />
</td>
</tr>""" % self.getLength())
def clone(self, gf):
ti = FieldInputType.clone(self, gf)
ti.setLength(self.getLength())
return ti
def getValues(self):
d = {}
d["length"] = self.getLength()
return d
def setValues(self, data):
if data.has_key("length"):
self.setLength(data.get("length"))
def getLength(self):
try:
if self._length: pass
except AttributeError:
self._length = ''
return self._length
def setLength(self, value):
self._length = value
class TelephoneInput(FieldInputType, Fossilizable):
fossilizes(IRegFormTelephoneInputFieldFossil)
_id = "telephone"
_REGEX = r'^(\(\+\d*\)|\+)?\s*(\d(\s*|\-))+$'
_PATTERN = re.compile(_REGEX)
def getName(cls):
return "Telephone"
getName = classmethod(getName)
def __init__(self, field):
FieldInputType.__init__(self, field)
self._length = ''
def _getModifHTML(self, item, registrant, default=""):
description = self._parent.getDescription()
htmlName = self.getHTMLName()
v = default
if item is not None:
v = item.getValue()
htmlName = item.getHTMLName()
disable = ""
if self._parent.isMandatory():
param = """<script>
addParam($E('%s'), 'text', false, function(value) {
if (!/%s/.test(value)) {
return "Invalid phone number format";
}
});
</script>""" % (htmlName, TelephoneInput._REGEX)
else:
param = ''
if self.getLength():
length = 'size="%s"' % self.getLength()
else:
length = 'size="30"'
format = """ <span class="inputDescription">(+) 999 99 99 99</span>"""
tmp = """<input type="text" id="%s" name="%s" value="%s" %s %s>%s%s""" % (htmlName, htmlName, v , disable, length, format, param)
tmp = """ <td>%s</td>""" % tmp
if description:
tmp = """%s</tr><tr><td>%s</td>""" % (tmp, self._getDescriptionHTML(description))
return tmp
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
v = params.get(self.getHTMLName(), "")
if not override and self.getParent().isMandatory() and v.strip() == "":
raise FormValuesError(_("The field \"%s\" is mandatory. Please fill it.") % self.getParent().getCaption())
if validate and v.strip() != '' and not TelephoneInput._PATTERN.match(v):
raise FormValuesError(_("The field \"%s\" is in wrong format. Please fill it in the correct format: (+) 999 99 99 99") % self.getParent().getCaption())
v = re.sub(r'\s+|\-+', '', v)
item.setQuantity(0)
item.setValue(v)
item.setMandatory(self.getParent().isMandatory())
item.setHTMLName(self.getHTMLName())
def _getSpecialOptionsHTML(self):
return i18nformat("""
<tr>
<td class="titleCellTD"><span class="titleCellFormat">_("Size in chars")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="length" value="%s" />
</td>
</tr>""" % self.getLength())
def clone(self, gf):
ti = FieldInputType.clone(self, gf)
ti.setLength(self.getLength())
return ti
def getValues(self):
d = {}
d["length"] = self.getLength()
return d
def setValues(self, data):
if data.has_key("length"):
self.setLength(data.get("length"))
def getLength(self):
try:
if self._length:
pass
except AttributeError:
self._length = ''
return self._length
def setLength(self, value):
self._length = value
class TextareaInput(FieldInputType, Fossilizable):
fossilizes(IRegFormTextareaInputFieldFossil)
_id = "textarea"
def getName(cls):
return "Textarea"
getName = classmethod(getName)
def __init__(self, field):
FieldInputType.__init__(self, field)
self._numberOfRows = ''
self._numberOfColumns = ''
def _getModifHTML(self, item, registrant, default=""):
description = self._parent.getDescription()
price = self._parent.getPrice()
billable = self._parent.isBillable()
currency = self._parent.getParent().getRegistrationForm().getCurrency()
htmlName = self.getHTMLName()
v = default
if item is not None:
v = item.getValue()
price = item.getPrice()
billable = item.isBillable()
currency = item.getCurrency()
htmlName = item.getHTMLName()
disable = ""
if (registrant is not None and billable and registrant.getPayed()):
disable = "disabled=\"true\""
#pass
if description:
desc = """%s<br/>""" % self._getDescriptionHTML(description)
else:
desc = ''
if self._parent.isMandatory():
param = """<script>addParam($E('%s'), 'text', false);</script>""" % htmlName
else:
param = ''
cols = self.getNumberOfColumns()
if not cols:
cols = 60
rows = self.getNumberOfRows()
if not rows:
rows = 4
tmp = """%s<textarea id="%s" name="%s" cols="%s" rows="%s" %s >%s</textarea>%s""" % (desc, htmlName, htmlName, cols, rows, disable, v, param)
tmp = """ <td>%s</td><td align="right" align="bottom">""" % tmp
tmp = """%s </td> """ % tmp
return tmp
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
if (registrant is not None and self._parent.isBillable() and registrant.getPayed()):
#if ( item is not None and item.isBillable()):
#######################
# if the registrant has already payed, Indico blocks all the modifications about new/removed items
return
v = params.get(self.getHTMLName(), "")
if not override and self.getParent().isMandatory() and v.strip() == "":
raise FormValuesError(_("The field \"%s\" is mandatory. Please fill it.") % self.getParent().getCaption())
item.setQuantity(0)
item.setValue(v)
#item.setBillable(self._parent.isBillable())
#item.setPrice(self._parent.getPrice())
#item.setCurrency(self._parent.getParent().getRegistrationForm().getCurrency())
item.setMandatory(self.getParent().isMandatory())
item.setHTMLName(self.getHTMLName())
def _getSpecialOptionsHTML(self):
html = [i18nformat("""
<tr>
<td class="titleCellTD"><span class="titleCellFormat">_("Number of rows")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="numberOfRows" value="%s" />
</td>
</tr>""") % self.getNumberOfRows()]
html.append(i18nformat("""
<tr>
<td class="titleCellTD"><span class="titleCellFormat">_("Row length")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="numberOfColumns" value="%s" />
</td>
</tr>""") % self.getNumberOfColumns())
return "".join(html)
def clone(self, gf):
ti = FieldInputType.clone(self, gf)
ti.setNumberOfRows(self.getNumberOfRows())
ti.setNumberOfColumns(self.getNumberOfColumns())
return ti
def getValues(self):
d = {}
d["numberOfRows"] = self.getNumberOfRows()
d["numberOfColumns"] = self.getNumberOfColumns()
return d
def setValues(self, data):
if data.has_key("numberOfRows"):
self.setNumberOfRows(data.get("numberOfRows"))
if data.has_key("numberOfColumns"):
self.setNumberOfColumns(data.get("numberOfColumns"))
def getNumberOfRows(self):
try:
if self._numberOfRows: pass
except AttributeError:
self._numberOfRows = ''
return self._numberOfRows
def setNumberOfRows(self, value):
self._numberOfRows = value
def getNumberOfColumns(self):
try:
if self._numberOfColumns: pass
except AttributeError:
self._numberOfColumns = ''
return self._numberOfColumns
def setNumberOfColumns(self, value):
self._numberOfColumns = value
class NumberInput(FieldInputType, Fossilizable):
fossilizes(IRegFormNumberInputFieldFossil)
_id = "number"
_useLabelCol = False
def getName(cls):
return "Number"
getName = classmethod(getName)
def __init__(self, field):
FieldInputType.__init__(self, field)
self._length = ''
self._minValue = 0
def _getModifHTML(self, item, registrant, default=""):
description = self._parent.getDescription()
price = self._parent.getPrice()
billable = self._parent.isBillable()
currency = self._parent.getParent().getRegistrationForm().getCurrency()
htmlName = self.getHTMLName()
v = default or self.getMinValue()
if item is not None:
v = item.getValue()
price = item.getPrice()
billable = item.isBillable()
currency = item.getCurrency()
htmlName = item.getHTMLName()
mandat = "false" if self._parent.isMandatory() else "true"
if self.getMinValue() != 0:
extra_check = "IndicoUtil.validate_number({minimum:%s})" % self.getMinValue()
else:
extra_check = "function(){}"
param = """<script>addParam($E('%s'), 'non_negative_int', %s, %s);</script>""" % (htmlName, mandat, extra_check)
disable = ""
if (registrant is not None and billable and registrant.getPayed()):
disable = "disabled=\"true\""
#pass
if self.getLength():
length = 'size="%s"' % self.getLength()
else:
length = 'size="6"'
onkeyup = ""
if billable:
onkeyup = """onkeyup="
var value = ((isNaN(parseInt(this.value, 10)) || parseInt(this.value, 10) < 0) ? 0 : parseInt(this.value, 10)) * %s;
$E('subtotal-%s').dom.innerHTML = parseInt(value) === parseFloat(value) ? value : value.toFixed(2);"
""" % (price, htmlName)
tmp = """<input type="text" id="%s" name="%s" value="%s" %s %s %s /> %s""" % (htmlName, htmlName, v, onkeyup, disable, length, param)
tmp = """ <td>%s</td>""" % tmp
if billable:
subTotal = (float(price) * int(v) or 0)
tmp = """%s<td align="right" align="bottom"> <span>%s %s</span><span class="regFormSubtotal">Total: <span id="subtotal-%s">%s</span> %s</span></td> """ % (tmp, price, currency, htmlName, subTotal, currency)
if description:
tmp = """%s</tr><tr><td colspan="2">%s</td>""" % (tmp, self._getDescriptionHTML(description))
return tmp
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
v = params.get(self.getHTMLName(), "")
quantity = 0
if (registrant is not None and self._parent.isBillable() and registrant.getPayed()):
#if ( item is not None and item.isBillable() ):
#######################
# if the registrant has already payed, Indico blocks all the modifications about new/removed items
return
if not override and self.getParent().isMandatory() and v.strip() == "":
raise FormValuesError(_("The field \"%s\" is mandatory. Please fill it.") % self.getParent().getCaption())
if not override and self.getParent().isMandatory() and (not v.isalnum() or int(v) < 0):
raise FormValuesError(_("The field \"%s\" is mandatory. Please fill it with a number.") % self.getParent().getCaption())
if not v.isalnum() or int(v) < 1:
quantity = 0
else:
quantity = int(v)
if v.strip() != '' and quantity < self.getMinValue():
raise FormValuesError(_("The field \"%s\" needs to be filled with a number greater than or equal to %d.") % (self.getParent().getCaption(), self.getMinValue()))
item.setQuantity(quantity)
item.setValue(quantity)
item.setBillable(self._parent.isBillable())
item.setPrice(self._parent.getPrice())
item.setCurrency(self._parent.getParent().getRegistrationForm().getCurrency())
item.setMandatory(self.getParent().isMandatory())
item.setHTMLName(self.getHTMLName())
def _getSpecialOptionsHTML(self):
price = self._parent.getPrice()
billable = self._parent.isBillable()
checked = ""
if billable:
checked = "checked=\"checked\""
return i18nformat("""
<tr>
<td class="titleCellTD"><span class="titleCellFormat">_("Min. value")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="minValue" value="%s" />
</td>
</tr>
<tr>
<td class="titleCellTD"><span class="titleCellFormat">_("Size in chars")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="length" value="%s" />
</td>
</tr>
<tr>
<td class="titleCellTD"><span class="titleCellFormat">Is Billable</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="checkbox" name="billable" size="60" %s> _("(uncheck if it is not billable)")
</td>
</tr>
<tr>
<td class="titleCellTD"><span class="titleCellFormat"> _("Price (multiplied with entered number)")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="price" size="60" value=%s>
</td>
</tr>""" % (self.getMinValue(), self.getLength(), checked, price))
def clone(self, gf):
ni = FieldInputType.clone(self, gf)
ni.setLength(self.getLength())
ni.setMinValue(self.getMinValue())
return ni
def getValues(self):
d = {}
d["length"] = self.getLength()
d["minValue"] = self.getMinValue()
return d
def setValues(self, data):
if data.has_key("length"):
self.setLength(data.get("length"))
if data.has_key("minValue"):
self.setMinValue(int(data.get("minValue") or 0))
def getLength(self):
try:
if self._length: pass
except AttributeError:
self._length = ''
return self._length
def setLength(self, value):
self._length = value
def getMinValue(self):
try:
if self._minValue: pass
except AttributeError:
self._minValue = 0
return self._minValue
def setMinValue(self, value):
self._minValue = value
def getModifLabelCol(self):
return self._parent.getCaption()
class LabelInput(FieldInputType, Fossilizable):
fossilizes(IRegFormLabelInputFieldFossil)
_id = "label"
_wholeRow = True
def getName(cls):
return "Label"
getName = classmethod(getName)
def _getModifHTML(self, item, registrant, default=""):
description = self._parent.getDescription()
price = self._parent.getPrice()
billable = self._parent.isBillable()
currency = self._parent.getParent().getRegistrationForm().getCurrency()
v = default
if item is not None:
v = item.getValue()
price = item.getPrice()
billable = item.isBillable()
currency = item.getCurrency()
#pass
tmp = """ <td align="right" valign="bottom">"""
if billable:
tmp = """%s %s %s</td> """ % (tmp, price, currency)
else:
tmp = """%s </td> """ % tmp
if description:
tmp = """%s</tr><tr><td colspan="2">%s</td>""" % (tmp, self._getDescriptionHTML(description))
return tmp
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
if (registrant is not None and self._parent.isBillable() and registrant.getPayed()):
#if ( item is not None and item.isBillable()):
#######################
# if the registrant has already payed, Indico blocks all the modifications about new/removed items
return
#item.setQuantity(0)
#else:
item.setQuantity(1)
item.setValue("")
item.setBillable(self._parent.isBillable())
item.setPrice(self._parent.getPrice())
item.setCurrency(self._parent.getParent().getRegistrationForm().getCurrency())
item.setMandatory(self.getParent().isMandatory())
item.setHTMLName(self.getHTMLName())
#v=params.get(self.getHTMLName(),"")
#if self.getParent().isMandatory() and v.strip()=="":
# raise FormValuesError("The field \"%s\" is mandatory. Please fill it."%self.getParent().getCaption())
#item.setValue(v)
class CheckboxInput(FieldInputType, Fossilizable):
fossilizes(IRegFormCheckboxInputFieldFossil)
_id = "checkbox"
_useLabelCol = False
def getName(cls):
return "Multiple choices/checkbox"
getName = classmethod(getName)
def _getModifHTML(self, item, registrant, default=""):
disable = ""
checked = ""
mandatory = ""
caption = self._parent.getCaption()
description = self._parent.getDescription()
price = self._parent.getPrice()
billable = self._parent.isBillable()
currency = self._parent.getParent().getRegistrationForm().getCurrency()
htmlName = self.getHTMLName()
v = default
quantity = 0
if item is not None:
v = item.getValue()
price = item.getPrice()
billable = item.isBillable()
currency = item.getCurrency()
htmlName = item.getHTMLName()
quantity = item.getQuantity()
mandatory = """<span class="regFormMandatoryField">*</span>""" if self._parent.isMandatory() else ""
if (registrant is not None and billable and registrant.getPayed()) or (not self.getParent().hasAvailablePlaces() and not quantity):
disable = "disabled=\"disabled\""
if v == "yes":
checked = "checked=\"checked\""
pm = ''
if self._parent.isMandatory():
pm = """<script>addParam($E('%s'), 'checkBox', false);</script>""" % htmlName
tmp = """<input type="checkbox" id="%s" name="%s" %s %s> %s %s%s""" % (htmlName, htmlName, checked, disable, caption, mandatory, pm)
tmp = """ <td>%s</td><td align="right" align="bottom">""" % tmp
if billable:
tmp = """%s %s %s """ % (tmp, price, currency)
if self.getParent().getPlacesLimit():
tmp += """ <span class='placesLeft'>[%s place(s) left]</span>""" % (self.getParent().getNoPlacesLeft())
tmp += """</td>"""
if description:
tmp = """%s</tr><tr><td colspan="2">%s</td>""" % (tmp, self._getDescriptionHTML(description))
return tmp
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
if (registrant is not None and self._parent.isBillable() and registrant.getPayed()):
#if ( item is not None and item.isBillable()):
#######################
# if the registrant has already payed, Indico blocks all the modifications about new/removed items
return
if params.has_key(self.getHTMLName()):
item.setValue("yes")
item.setQuantity(1)
elif not override and self.getParent().isMandatory():
raise FormValuesError(_('The checkbox "%s" is mandatory. Please enable it.') % self.getParent().getCaption())
else:
item.setValue("no")
item.setQuantity(0)
item.setBillable(self._parent.isBillable())
item.setPrice(self._parent.getPrice())
item.setCurrency(self._parent.getParent().getRegistrationForm().getCurrency())
item.setMandatory(self.getParent().isMandatory())
item.setHTMLName(self.getHTMLName())
def _getSpecialOptionsHTML(self):
html = FieldInputType._getSpecialOptionsHTML(self)
html += i18nformat("""<tr>
<td class="titleCellTD"><span class="titleCellFormat"> _("Places (0 for unlimited)")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="placesLimit" size="60" value=%s>
</td>
</tr>""") % (self._parent.getPlacesLimit())
return html
class YesNoInput(FieldInputType, Fossilizable):
fossilizes(IRegFormYesNoInputFieldFossil)
_id = "yes/no"
def getName(cls):
return "Yes/No"
getName = classmethod(getName)
def _getModifHTML(self, item, registrant, default=""):
description = self._parent.getDescription()
price = self._parent.getPrice()
billable = self._parent.isBillable()
currency = self._parent.getParent().getRegistrationForm().getCurrency()
htmlName = self.getHTMLName()
v = default
if item is not None:
v = item.getValue()
price = item.getPrice()
billable = item.isBillable()
currency = item.getCurrency()
htmlName = item.getHTMLName()
disable = ""
if self._parent.isMandatory():
param = """<script>addParam($E('%s'), 'text', false);</script>""" % htmlName
else:
param = ''
checkedYes = ""
checkedNo = ""
if (registrant is not None and billable and registrant.getPayed()):
disable = "disabled=\"true\""
#pass
if v == "yes":
checkedYes = "selected"
elif v == "no":
checkedNo = "selected"
placesInfo = ""
if self.getParent().getPlacesLimit():
placesInfo = """ [%s place(s) left]""" % (self.getParent().getNoPlacesLeft())
if v != "yes" and not self.getParent().hasAvailablePlaces():
checkedYes += " disabled"
tmp = """<select id="%s" name="%s" %s><option value="">-- Choose a value --</option><option value="yes" %s>yes%s</option><option value="no" %s>no</option></select>%s""" % (htmlName, htmlName, disable, checkedYes, placesInfo, checkedNo, param)
tmp = """ <td>%s</td><td align="right" align="bottom">""" % tmp
if billable:
tmp = """%s %s %s</td> """ % (tmp, price, currency)
else:
tmp = """%s </td> """ % tmp
if description:
tmp = """%s</tr><tr><td colspan="2">%s</td>""" % (tmp, self._getDescriptionHTML(description))
return tmp
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
if (registrant is not None and self._parent.isBillable() and registrant.getPayed()):
#if ( item is not None and item.isBillable()):
# return
#######################
# if the registrant has already payed, Indico blocks all the modifications about new/removed items
return
v = params.get(self.getHTMLName())
if not override and self.getParent().isMandatory() and v.strip() == "":
raise FormValuesError(_("The field \"%s\" is mandatory. Please fill it.") % self.getParent().getCaption())
if v == "yes":
item.setQuantity(1)
else:
item.setQuantity(0)
item.setValue(v)
item.setBillable(self._parent.isBillable())
item.setPrice(self._parent.getPrice())
item.setCurrency(self._parent.getParent().getRegistrationForm().getCurrency())
item.setMandatory(self.getParent().isMandatory())
item.setHTMLName(self.getHTMLName())
def _getSpecialOptionsHTML(self):
html = FieldInputType._getSpecialOptionsHTML(self)
html += i18nformat("""<tr>
<td class="titleCellTD"><span class="titleCellFormat"> _("Places (0 for unlimited)")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="placesLimit" size="60" value=%s>
</td>
</tr>""") % (self._parent.getPlacesLimit())
return html
class FileInput(FieldInputType, Fossilizable):
fossilizes(IRegFormFileInputFieldFossil)
_id = "file"
def getName(cls):
return "File"
getName = classmethod(getName)
def getValueDisplay(self, value):
uh = (urlHandlers.UHRegistrantAttachmentFileAccess if request.blueprint == 'event_mgmt' else
urlHandlers.UHFileAccess)
return """<a href="%s">%s</a>""" % (uh.getURL(value), value.getFileName())
def _getModifHTML(self, item, registrant, default=None):
from MaKaC.webinterface.pages.registrationForm import WFileInputField
wc = WFileInputField(self, item, default)
return wc.getHTML()
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
v = params.get(self.getHTMLName(), "")
newValueEmpty = v.strip() == "" if isinstance(v, str) else v.filename == ""
if not override and self.getParent().isMandatory() and newValueEmpty:
raise FormValuesError(_("The field \"%s\" is mandatory. Please fill it.") % self.getParent().getCaption())
item.setMandatory(self.getParent().isMandatory())
item.setHTMLName(self.getHTMLName())
# There was no file saved on DB
if item.getValue() is None:
if not newValueEmpty: # user submits a new file
f = registrant.saveFile(v)
item.setValue(f)
# There was already a file on DB
# if 'str': it means that we are receiving the name of the already existing file. Do not modify.
# if file descriptor: replace previous file with new one
# if 'empty' value: just remove
elif not isinstance(v, str):
# delete
registrant.deleteFile(item.getValue().getId())
item.setValue(None)
# new file
if not newValueEmpty:
f = registrant.saveFile(v)
item.setValue(f)
def _getSpecialOptionsHTML(self):
return ""
def clone(self, gf):
ti = FieldInputType.clone(self, gf)
return ti
class RadioItem(Persistent, Fossilizable):
fossilizes(IRegFormRadioItemFossil)
def __init__(self, parent):
self._parent = parent
self._id = ""
self._caption = ""
self._billable = False
self._price = ""
self._enabled = True
self._placesLimit = 0
self._currentNoPlaces = 0
def setValues(self, data):
if data.has_key("caption"):
self.setCaption(data["caption"])
if data.has_key("isBillable"):
self.setBillable(data["isBillable"])
if data.has_key("price"):
self.setPrice(data["price"])
if data.has_key("isEnabled"):
self.setEnabled(data["isEnabled"])
if data.has_key("placesLimit"):
self.setPlacesLimit(data["placesLimit"])
def getId(self):
return self._id
def setId(self, id):
self._id = id
def getCaption(self):
return self._caption
def setCaption(self, cap):
if self._caption != cap:
self.updateRegistrantSelection(cap)
self._caption = cap
def setEnabled(self, en=True):
self._enabled = en
def isEnabled(self):
try:
return self._enabled
except:
self.setEnabled()
return self._enabled
def isBillable(self):
try:
return self._billable
except:
self._billable = False
return self._billable
def setBillable(self, v):
self._billable = v
def getPrice(self):
try:
return self._price
except:
self.setPrice(False)
return self._price
def setPrice(self, price):
if price:
match = PRICE_PATTERN.match(price)
if match:
price = match.group(1)
else:
raise MaKaCError(_('The price is in incorrect format!'))
self._price = price
def getPlacesLimit(self):
try:
if self._placesLimit:
pass
except AttributeError, e:
self._placesLimit = 0
return self._placesLimit
def setPlacesLimit(self, limit):
if limit == "":
limit = "0"
try:
l = int(limit)
except ValueError:
raise FormValuesError(_("Please enter a number for the limit of places"))
self._placesLimit = l
self.updateCurrentNoPlaces()
def getCurrentNoPlaces(self):
try:
if self._currentNoPlaces:
pass
except AttributeError:
self._currentNoPlaces = 0
return self._currentNoPlaces
def hasAvailablePlaces(self):
if not self.getPlacesLimit():
return True
return (self.getCurrentNoPlaces() < self.getPlacesLimit())
def getNoPlacesLeft(self):
return self.getPlacesLimit() - self.getCurrentNoPlaces()
def increaseNoPlaces(self):
if self.getPlacesLimit() > 0 :
if self.getCurrentNoPlaces() >= self.getPlacesLimit():
raise FormValuesError(_("""The place limit has been exceeded."""))
self._currentNoPlaces += 1
def decreaseNoPlaces(self):
if self.getPlacesLimit() > 0 and self.getCurrentNoPlaces() > 0:
self._currentNoPlaces -= 1
def updateCurrentNoPlaces(self):
# self -> RadioGroupInput -> GeneralField -> GeneralSectionForm
gf = self._parent._parent
self._currentNoPlaces = 0
gsf = gf._parent
regform = gsf.getRegistrationForm()
for reg in regform.getConference().getRegistrantsList():
mg = reg.getMiscellaneousGroupById(gsf.getId())
if not mg:
continue
gf.getId() # for some reason it's empty when calling it for the first time
item = mg.getResponseItemById(gf.getId())
if item is not None and item.getQuantity() and item.getValue() == self.getCaption():
self.increaseNoPlaces()
def updateRegistrantSelection(self, caption):
gf = self._parent._parent
self._currentNoPlaces = 0
gsf = gf._parent
regform = gsf.getRegistrationForm()
for reg in regform.getConference().getRegistrantsList():
mg = reg.getMiscellaneousGroupById(gsf.getId())
if not mg:
continue
item = mg.getResponseItemById(gf.getId())
if item is not None and item.getQuantity() and item.getValue() == self.getCaption():
item.setValue(caption)
self.increaseNoPlaces()
def clone(self, parent):
ri = RadioItem(parent)
ri.setCaption(self.getCaption())
ri.setBillable(self.isBillable())
ri.setPrice(self.getPrice())
ri.setEnabled(self.isEnabled())
ri.setPlacesLimit(self.getPlacesLimit())
return ri
def _cmpCaption(r1, r2):
return cmp(r1.getCaption(), r2.getCaption())
_cmpCaption = staticmethod(_cmpCaption)
class RadioGroupInput(FieldInputType, Fossilizable):
fossilizes(IRegFormRadioGroupInputFieldFossil)
_id = "radio"
def getName(cls):
return "Multiple options/One choice"
getName = classmethod(getName)
def __init__(self, field):
FieldInputType.__init__(self, field)
self._items = []
self._radioItemGenerator = Counter()
self._defaultItem = None
self._inputType = "radiogroup"
self._emptyCaption = '-- Choose a value --'
def getValues(self):
d = {}
d["radioitems"] = []
for i in self.getItemsList():
tmp = {}
tmp["caption"] = i.getCaption()
tmp["billable"] = i.isBillable()
tmp["price"] = i.getPrice()
tmp["isEnabled"] = i.isEnabled()
tmp["placesLimit"] = i.getPlacesLimit()
tmp["id"] = i.getId()
d["radioitems"].append(tmp)
d["defaultItem"] = self.getDefaultItem()
d["inputType"] = self.getInputType()
d["emptyCaption"] = self.getEmptyCaption()
return d
def setValues(self, data):
if "radioitems" in data:
for i, itemValues in enumerate(data.get("radioitems", [])):
item = self.getItemById(itemValues.get('id'))
if item is None:
self.createItem(itemValues, i)
else:
# remove else set and move
if 'remove' in itemValues:
self.removeItem(item)
else:
item.setValues(itemValues)
self.addItem(item, i)
if "defaultItem" in data:
self.setDefaultItem(data.get("defaultItem", None))
if "inputType" in data:
self._inputType = data.get("inputType")
if "emptyCaption" in data:
self._emptyCaption = data["emptyCaption"]
def _beforeValueChange(self, item, newItem):
# if the item had a quantity, make the place available again
selected = self.getSelectedItem(item)
if not newItem and selected:
selected.decreaseNoPlaces()
def _afterValueChange(self, item):
# if the item has a quantity now, make the place unavailable
selected = self.getSelectedItem(item)
if selected:
selected.increaseNoPlaces()
def getSelectedItem(self, item):
for val in self.getItemsList():
if val.getCaption() == item.getValue():
return val
return None
def getDefaultItem(self):
try:
if self._defaultItem:
pass
except AttributeError, e:
self._defaultItem = None
return self._defaultItem
def setDefaultItem(self, caption):
if caption == "":
self._defaultItem = None
else:
self._defaultItem = caption
def setDefaultItemById(self, id):
item = self.getItemById(id)
if item in self.getItemsList():
self.setDefaultItem(item.getCaption())
def changeItemById(self, id, caption=None, billable=None, price=None, places=None):
item = self.getItemById(id)
if item in self.getItemsList():
if caption:
item.setCaption(caption)
if billable and price:
item.setBillable(billable)
item.setPrice(price)
if places or places == 0: # empty string doesn't change it, 0 does
item.setPlacesLimit(places)
def removePriceById(self, id):
item = self.getItemById(id)
if item in self.getItemsList():
item.setBillable(False)
item.setPrice("")
def setInputType(self, inputType):
self._inputType = inputType
def getInputType(self):
try:
if self._inputType:
pass
except AttributeError:
self._inputType = "radiogroup"
return self._inputType
def getItemsList(self):
if type(self._items) == dict:
self._items = self._items.values()
return self._items
def addItem(self, item, i=None):
if i is None:
i = len(self.getItemsList())
if item in self.getItemsList():
self.removeItem(item)
else:
item.setId(str(self._getRadioItemGenerator().newCount()))
self.getItemsList().insert(i, item)
self.notifyModification()
return True
def createItem(self, itemValues, i=None):
item = RadioItem(self)
item.setValues(itemValues)
self.addItem(item, i)
def removeItem(self, item):
if item in self.getItemsList():
self.getItemsList().remove(item)
self.notifyModification()
def removeItemById(self, id):
return self.removeItem(self.getItemById(id))
def disableItemById(self, id):
item = self.getItemById(id)
if item in self.getItemsList():
item.setEnabled(not item.isEnabled())
self.notifyModification()
def getItemById(self, id):
for f in self.getItemsList():
if f.getId() == id:
return f
return None
def notifyModification(self):
self._p_changed = 1
def clone(self, gf):
rgi = FieldInputType.clone(self, gf)
for item in self.getItemsList():
rgi.addItem(item.clone(rgi))
rgi.setDefaultItem(self.getDefaultItem())
rgi.setInputType(self.getInputType())
return rgi
def _getRadioItemGenerator(self):
return self._radioItemGenerator
def getEmptyCaption(self):
try:
return self._emptyCaption
except:
self._emptyCaption = '-- Choose a value --'
return self._emptyCaption
def _getRadioGroupModifHTML(self, item, registrant, default=""):
description = self._parent.getDescription()
caption = self._parent.getCaption()
billable = self._parent.isBillable()
currency = self._parent.getParent().getRegistrationForm().getCurrency()
value = default
if item is not None:
billable = item.isBillable()
currency = item.getCurrency()
value = item.getValue()
tmp = ["""<td align="right" align="bottom" colspan="2"></td>"""]
counter = 0
for val in self.getItemsList():
counter += 1
itemId = "%s_%s" % (self.getHTMLName(), counter)
disable = ""
if not val.isEnabled():
disable = "disabled=\"disabled\""
if (registrant is not None and (val.isBillable() or billable) and registrant.getPayed()):
disable = "disabled=\"disabled\""
elif (not val.hasAvailablePlaces() and val.getCaption() != value):
disable = "disabled=\"disabled\""
checked = ""
if val.getCaption() == value:
checked = "checked"
elif not value and val.getCaption() == self.getDefaultItem():
checked = "checked"
tmp.append("""<tr><td></td><td><input type="radio" id="%s" name="%s" value="%s" %s %s> %s</td><td align="right" style="vertical-align: bottom;" >""" % (itemId, self.getHTMLName(), val.getId(), checked, disable, val.getCaption()))
if val.isBillable():
tmp.append(""" %s %s""" % (val.getPrice(), currency))
tmp.append("""</td><td align="right" style="vertical-align: bottom;" >""")
if val.getPlacesLimit():
tmp.append(""" <span class='placesLeft'>[%s place(s) left]</span>""" % (val.getNoPlacesLeft()))
tmp.append(""" </td></tr> """)
if description:
tmp.append("""<tr><td></td><td colspan="2">%s</td></tr>""" % (self._getDescriptionHTML(description)))
if self._parent.isMandatory():
validator = """
for (var i=1; i<=%s; i++) {
var item = $E('%s_' + i);
if (item.dom.checked) {
return true;
}
}
new AlertPopup($T("Warning"), $T('You must select option for "%s"!')).open();
return false;
""" % (counter, self.getHTMLName(), caption)
script = """<script>addValidator(function() {%s});</script>""" % validator
tmp.append(script)
return "".join(tmp)
def _getDropDownModifHTML(self, item, registrant, default=""):
description = self._parent.getDescription()
billable = self._parent.isBillable()
currency = self._parent.getParent().getRegistrationForm().getCurrency()
value = default
if item is not None:
billable = item.isBillable()
currency = item.getCurrency()
value = item.getValue()
if not value:
value = self.getDefaultItem()
if self._parent.isMandatory():
param = """<script>addParam($E('%s'), 'text', false);</script>""" % self.getHTMLName()
else:
param = ''
tmp = []
tmp.append("""<td><select id="%s" name="%s">""" % (self.getHTMLName(), self.getHTMLName()))
tmp.append("""<option value="">%s</option>""" % self.getEmptyCaption())
for radioItem in self.getItemsList():
if radioItem.isEnabled() and not (registrant is not None and (radioItem.isBillable() or billable) and registrant.getPayed()):
placesInfo = ""
if radioItem.getPlacesLimit():
placesInfo = """ [%s place(s) left]""" % (radioItem.getNoPlacesLeft())
disabled = ""
if (not radioItem.hasAvailablePlaces() and radioItem.getCaption() != value):
disabled = " disabled='disabled'"
selected = ""
if radioItem.getCaption() == value:
selected = " selected='selected'"
else:
selected = ''
if radioItem.isBillable():
price = """ %s %s """ % (radioItem.getPrice(), currency)
else:
price = ''
tmp.append("""<option value="%s"%s%s>%s%s%s</option>""" % (radioItem.getId(), selected, disabled, radioItem.getCaption(), price, placesInfo))
tmp.append("""</select>%s</td>""" % param)
if description:
tmp.append("""<tr><td colspan="2">%s</td></tr>""" % (self._getDescriptionHTML(description)))
return "".join(tmp)
def _getModifHTML(self, item, registrant, default=""):
if self.getInputType() == 'radiogroup':
return self._getRadioGroupModifHTML(item, registrant, default)
else:
return self._getDropDownModifHTML(item, registrant, default)
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
radioitemid = params.get(self.getHTMLName(), "")
billable = False
for val in self.getItemsList():
if val.isBillable():
billable = True
if (registrant is not None and self._parent.isBillable() and registrant.getPayed()):
#if (item is not None and billable):
#######################
# if the registrant has already payed, Indico blocks all the modifications about new/removed items
return
if not override and self.getParent().isMandatory() and radioitemid.strip() == "":
raise FormValuesError(_("The field \"%s\" is mandatory. Please fill it.") % self.getParent().getCaption())
price = 0
quantity = 0
caption = ""
if radioitemid.strip() != "":
radioitem = self.getItemById(radioitemid)
if radioitem is not None:
caption = radioitem.getCaption()
billable = radioitem.isBillable()
price = radioitem.getPrice()
quantity = 1
item.setCurrency(self._parent.getParent().getRegistrationForm().getCurrency())
item.setMandatory(self.getParent().isMandatory())
item.setValue(caption)
item.setBillable(billable)
item.setPrice(price)
item.setQuantity(quantity)
item.setHTMLName(self.getHTMLName())
def _getSpecialOptionsHTML(self):
if self.getInputType() == 'radiogroup':
radioSelected = ' selected="selected"'
dropdownSelected = ''
else:
radioSelected = ''
dropdownSelected = ' selected="selected"'
if self.getParent().isLocked('input'):
typeDisabled = ' disabled="disabled"'
else:
typeDisabled = ''
html = [i18nformat("""
<tr>
<td class="titleCellTD"><span class="titleCellFormat">_("Type of input")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<select name="inputtype"%(typeDisabled)s>
<option value="radiogroup"%(radioSelected)s>Radio group</option>
<option value="dropdown"%(dropdownSelected)s>Drop-down menu</option>
</select>
</td>
</tr>
<tr>
<td class="titleCellTD"><span class="titleCellFormat">Items</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<table>""") % dict(radioSelected=radioSelected, dropdownSelected=dropdownSelected, typeDisabled=typeDisabled)]
html.append(i18nformat("""<tr>
<td valign="top" align="left">
<table>
<tr>
<td class="blacktext"><span class="titleCellFormat"> _("Caption")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="newradioitem">
</td>
</tr>
<tr>
<td class="blacktext"><span class="titleCellFormat"> _("Billable")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="checkbox" name="newbillable" >
</td>
</tr>
<tr>
<td class="blacktext"><span class="titleCellFormat"> _("Price")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="newprice">
</td>
</tr>
<tr>
<td class="blacktext"><span class="titleCellFormat"> _("Places")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<input type="text" name="newplaces">%s
</td>
</tr>
</table>
</td>
<td rowspan="2" valign="top" align="left">
<input type="submit" class="btn" name="addradioitem" value="_("add")" onfocus="addIsFocused = true;" onblur="addIsFocused = false;"><br>
<input type="submit" class="btn" name="removeradioitem" value="_("remove")"><br>
<input type="submit" class="btn" name="disableradioitem" value="_("enable/disable")"><br>
<input type="submit" class="btn" name="defaultradioitem" value="_("set as default")"><br>
<input type="submit" class="btn" name="changeradioitem" value="_("change")"><br>
<input type="submit" class="btn" name="removeradioitemprice" value="_("remove price")"><br>
</td>
</tr>
""") % inlineContextHelp(_('Use 0 for unlimited places')))
html.append("""<tr><td valign="top" align="left"><table>""")
billable = False
for v in self.getItemsList():
placesInfo = ""
if v.getPlacesLimit():
placesInfo = " (%s places)" % (v.getPlacesLimit())
html.append("""
<tr>
<td bgcolor="white" class="blacktext" ><input type="checkbox" name="radioitems" value="%s">%s%s</td>
<td bgcolor="white" class="blacktext" >
""" % (v.getId(), v.getCaption(), placesInfo))
if v.isBillable():
billable = True
html.append(i18nformat("""<span class="titleCellFormat"> _("Price"):%s</span>""") % (v.getPrice()))
if not v.isEnabled():
html.append("""<span><font color="red"> (""" + _("disabled") + """)</font></span>""")
if v.getCaption() == self.getDefaultItem():
html.append("""<span><font color="green"> (""" + _("default") + """)</font></span>""")
html.append("""
</td>
</tr>
""")
html.append("""</table></td></tr>""")
if billable:
html.append("""<input type="hidden" name="billable" value="">""")
html.append("""</table></td></tr>""")
return "".join(html)
class CountryInput(FieldInputType, Fossilizable):
fossilizes(IRegFormCountryInputFieldFossil)
_id = "country"
def getName(cls):
return "Country"
getName = classmethod(getName)
def getValueDisplay(self, value):
return CountryHolder().getCountryById(value)
def getCountriesList(self):
countryList = []
for countryKey in CountryHolder().getCountrySortedKeys():
country = {}
country["countryKey"] = countryKey
country["caption"] = CountryHolder().getCountryById(countryKey)
countryList.append(country)
return countryList
def _getModifHTML(self, item, registrant, default=""):
description = self._parent.getDescription()
htmlName = self.getHTMLName()
value = default
if item is not None:
value = item.getValue()
htmlName = item.getHTMLName()
disable = ""
if self._parent.isMandatory():
param = """<script>addParam($E('%s'), 'text', false);</script>""" % htmlName
else:
param = ''
inputHTML = i18nformat("""<option value="">-- _("Select a country") --</option>""")
for countryKey in CountryHolder().getCountrySortedKeys():
selected = ""
if value == countryKey:
selected = "selected"
inputHTML += """<option value="%s" %s>%s</option>""" % (countryKey, selected, CountryHolder().getCountryById(countryKey))
inputHTML = """<select id="%s" name="%s" %s>%s</select>%s""" % (htmlName, htmlName, disable, inputHTML, param)
tmp = """ <td>%s</td>""" % inputHTML
if description:
tmp = """%s</tr><tr><td colspan="2">%s</td>""" % (tmp, self._getDescriptionHTML(description))
return tmp
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
v = params.get(self.getHTMLName(), "")
if not override and self.getParent().isMandatory() and v.strip() == "":
raise FormValuesError(_("The field \"%s\" is mandatory. Please fill it.") % self.getParent().getCaption())
item.setQuantity(0)
item.setValue(v)
item.setMandatory(self.getParent().isMandatory())
item.setHTMLName(self.getHTMLName())
def _getSpecialOptionsHTML(self):
return ""
class DateInput(FieldInputType, Fossilizable):
fossilizes(IRegFormDateInputFieldFossil)
_id = "date"
def __init__(self, field):
FieldInputType.__init__(self, field)
self.dateFormat = ''
def getName(cls):
return "Date"
getName = classmethod(getName)
def getValues(self):
d = {}
d["dateFormat"] = self.getDateFormat()
return d
def setValues(self, data):
if data.has_key("dateFormat"):
self.setDateFormat(data.get("dateFormat"))
def clone(self, gf):
di = FieldInputType.clone(self, gf)
di.dateFormat = self.getDateFormat()
return di
def getDateFormat(self):
if self.dateFormat == '':
self.dateFormat = self.getDisplayFormats()[0][0]
return self.dateFormat
def setDateFormat(self, dateFormat):
self.dateFormat = dateFormat
def getDisplayFormats(self):
return [('%d/%m/%Y %H:%M', 'DD/MM/YYYY hh:mm'),
('%d.%m.%Y %H:%M', 'DD.MM.YYYY hh:mm'),
('%m/%d/%Y %H:%M', 'MM/DD/YYYY hh:mm'),
('%m.%d.%Y %H:%M', 'MM.DD.YYYY hh:mm'),
('%Y/%m/%d %H:%M', 'YYYY/MM/DD hh:mm'),
('%Y.%m.%d %H:%M', 'YYYY.MM.DD hh:mm'),
('%d/%m/%Y', 'DD/MM/YYYY'),
('%d.%m.%Y', 'DD.MM.YYYY'),
('%m/%d/%Y', 'MM/DD/YYYY'),
('%m.%d.%Y', 'MM.DD.YYYY'),
('%Y/%m/%d', 'YYYY/MM/DD'),
('%Y.%m.%d', 'YYYY.MM.DD'),
('%m/%Y', 'MM/YYYY'),
('%m.%Y', 'MM.YYYY'),
('%Y', 'YYYY')]
def getValueDisplay(self, value):
if type(value) == datetime:
return value.strftime(self.getDateFormat())
else:
return value
def getHTMLName(self):
return "_genfield_%s_%s_" % (self.getParent().getParent().getId(), self.getParent().getId())
def _getModifHTML(self, item, registrant, default=""):
description = self._parent.getDescription()
if item is not None:
date = item.getValue()
htmlName = item.getHTMLName()
else:
date = default or None
htmlName = self.getHTMLName()
from MaKaC.webinterface.wcomponents import WDateField
inputHTML = WDateField(htmlName, date, self.getDateFormat(), True, self._parent.isMandatory()).getHTML()
dateFormat = self.getDateFormat()
dateFormat = re.sub('%d', 'DD', dateFormat)
dateFormat = re.sub('%m', 'MM', dateFormat)
dateFormat = re.sub('%Y', 'YYYY', dateFormat)
dateFormat = re.sub('%H', 'hh', dateFormat)
dateFormat = re.sub('%M', 'mm', dateFormat)
dformat = """ <span class="inputDescription">%s</span>""" % dateFormat
tmp = "%s %s" % (inputHTML, dformat)
tmp = """ <td>%s</td><td align="right" align="bottom">""" % tmp
tmp = """%s </td> """ % tmp
if description:
tmp = """%s</tr><tr><td>%s</td>""" % (tmp, self._getDescriptionHTML(description))
return tmp
def _setResponseValue(self, item, params, registrant, override=False, validate=True):
day = params.get('%sDay' % self.getHTMLName(), 1) or 1
month = params.get('%sMonth' % self.getHTMLName(), 1) or 1
year = params.get('%sYear' % self.getHTMLName())
hour = params.get('%sHour' % self.getHTMLName(), 0) or 0
minute = params.get('%sMin' % self.getHTMLName(), 0) or 0
if year:
date = datetime(int(year), int(month), int(day), int(hour), int(minute))
item.setValue(date)
elif not self._parent.isMandatory():
item.setValue(None)
elif not override:
raise FormValuesError(_("The field \"%s\" is mandatory. Please fill it.") % self.getParent().getCaption())
item.setMandatory(self.getParent().isMandatory())
item.setHTMLName(self.getHTMLName())
def _getSpecialOptionsHTML(self):
formats = self.getDisplayFormats()
html = [i18nformat("""
<tr>
<td class="titleCellTD"><span class="titleCellFormat">_("Date format")</span></td>
<td bgcolor="white" class="blacktext" width="100%%">
<select name="dateFormat">""")]
for format, display in formats:
if self.getDateFormat() == format:
selected = ' selected="selected"'
else:
selected = ''
html.append("""<option value="%s"%s>%s</option>""" % (format, selected, display))
html.append(_("""</select>
</td>
</tr>"""))
return "".join(html)
def _getFormatDisplayText(self):
formats = self.getDisplayFormats()
value = ""
for dateFormat, display in formats:
if self.getDateFormat() == dateFormat:
value = display
break
return value
class FieldInputs:
_availableInputs = {TextInput.getId():TextInput, \
TextareaInput.getId(): TextareaInput, \
LabelInput.getId():LabelInput, \
NumberInput.getId():NumberInput, \
RadioGroupInput.getId():RadioGroupInput, \
CheckboxInput.getId():CheckboxInput, \
YesNoInput.getId(): YesNoInput, \
CountryInput.getId(): CountryInput, \
DateInput.getId(): DateInput, \
TelephoneInput.getId(): TelephoneInput, \
FileInput.getId(): FileInput
}
def getAvailableInputs(cls):
return cls._availableInputs
getAvailableInputs = classmethod(getAvailableInputs)
def getAvailableInputKlassById(cls, id):
return cls._availableInputs.get(id, None)
getAvailableInputKlassById = classmethod(getAvailableInputKlassById)
def getAvailableInputKeys(cls):
return cls._availableInputs.keys()
getAvailableInputKeys = classmethod(getAvailableInputKeys)
class GeneralField(Persistent, Fossilizable):
fossilizes(IRegFormGeneralFieldFossil)
def __init__(self, parent, data=None):
self._parent = parent
self._id = ""
if data is None:
self._caption = "General Field"
self._input = FieldInputs.getAvailableInputKlassById("text")(self)
self._input.setValues(data)
self._mandatory = False
self._locked = ()
self._description = ""
self._billable = False
self._price = "0"
self._placesLimit = 0
self._currentNoPlaces = 0
self._disabled = True
self._pdField = None
else:
self._mandatory = False
self.setValues(data, True)
def clone(self, newsection):
field = GeneralField(newsection, self.getValues())
return field
def setValues(self, data, firstTime=False):
caption = data.get("caption", "")
if caption == "":
caption = _("General Field")
self.setCaption(caption)
## The following commented lines were removed, but it is unclear if they are not needed anymore.
if firstTime: # or not self.isLocked('input'):
self.setInput(FieldInputs.getAvailableInputKlassById(data.get("input", "text"))(self))
#else:
# self.setInput(FieldInputs.getAvailableInputKlassById(self.getInput().getId())(self))
if data.has_key("inputObj"):
self._input.setValues(data["inputObj"].getValues())
elif data.has_key('inputValues'):
self._input.setValues(data["inputValues"])
else:
self._input.setValues(data)
if firstTime:
self.setLocked(data.get("lock", ()))
if self.isMandatory() and self.isLocked('mandatory'):
self.setMandatory(True)
else:
self.setMandatory(data['mandatory'] if 'mandatory' in data else False)
if self.isLocked('disable'):
self.setDisabled(False)
elif 'disabled' in data:
self.setDisabled(data.get("disabled", False))
self.setBillable(data.get("billable", False))
self.setPrice(str(data.get("price", "")))
self.setPlacesLimit(data.get("placesLimit", "0"))
self.setDescription(data.get("description", ""))
if firstTime:
self.setPDField(data.get("pd"))
def getValues(self):
values = {}
values["caption"] = self.getCaption()
values["input"] = self.getInput().getId()
values["inputObj"] = self.getInput()
values["lock"] = self.getLocked()
values["mandatory"] = self.isMandatory()
values["disabled"] = self.isDisabled()
values["billable"] = self.isBillable()
values["price"] = self.getPrice()
values["placesLimit"] = self.getPlacesLimit()
values["description"] = self.getDescription()
values["pd"] = self.getPDField()
return values
def isTemporary(self):
return False
def setPDField(self, v):
self._pdField = v
def getPDField(self):
try:
return self._pdField
except:
self._pdField = None
return self._pdField
def isBillable(self):
try:
return self._billable
except:
self._billable = False
return self._billable
def setBillable(self, v):
self._billable = v
def getPrice(self):
try:
return self._price
except:
self._price = 0
return self._price
def setPrice(self, price):
if price:
match = PRICE_PATTERN.match(price)
if match:
price = match.group(1)
else:
raise MaKaCError(_('The price is in incorrect format!'))
self._price = price
def getPlacesLimit(self):
try:
if self._placesLimit:
pass
except AttributeError, e:
self._placesLimit = 0
return self._placesLimit
def setPlacesLimit(self, limit):
if limit == "":
limit = "0"
try:
l = int(limit)
except ValueError:
raise FormValuesError(_("Please enter a number for the limit of places"))
self._placesLimit = l
self.updateCurrentNoPlaces()
def getCurrentNoPlaces(self):
try:
if self._currentNoPlaces:
pass
except AttributeError:
self._currentNoPlaces = 0
return self._currentNoPlaces
def hasAvailablePlaces(self):
if not self.getPlacesLimit():
return True
return (self.getCurrentNoPlaces() < self.getPlacesLimit())
def getNoPlacesLeft(self):
return self.getPlacesLimit() - self.getCurrentNoPlaces()
def increaseNoPlaces(self):
if self.getPlacesLimit() > 0:
if self.getCurrentNoPlaces() >= self.getPlacesLimit():
raise FormValuesError(_("""The limit for the number of places is smaller than the current amount registered for this item."""))
self._currentNoPlaces += 1
def decreaseNoPlaces(self):
if self.getPlacesLimit() > 0 and self.getCurrentNoPlaces() > 0:
self._currentNoPlaces -= 1
def updateCurrentNoPlaces(self):
self._currentNoPlaces = 0
if self._parent.getId() == '':
# parent is not yet in the form
return
for reg in self._parent.getRegistrationForm().getConference().getRegistrantsList():
mg = reg.getMiscellaneousGroupById(self._parent.getId())
if mg:
item = mg.getResponseItemById(self.getId())
if item is not None and item.getQuantity():
self.increaseNoPlaces()
def getId(self):
return self._id
def setId(self, id):
self._id = id
def getCaption(self):
return self._caption
def setCaption(self, caption):
self._caption = caption
def getDescription(self):
try:
if self._description:
pass
except AttributeError:
self._description = ''
return self._description
def setDescription(self, description):
self._description = description
def getInput(self):
return self._input
def setInput(self, input):
self._input = input
def isMandatory(self):
return self._mandatory
def setMandatory(self, v):
self._mandatory = v
def getLocked(self):
try:
return self._locked
except:
self._locked = ()
return self._locked
def isLocked(self, what):
return what in self.getLocked()
def setLocked(self, v):
self._locked = v
def isDisabled(self):
try:
return self._disabled
except:
self._disabled = False
return self._disabled
def setDisabled(self, v):
self._disabled = v
def getParent(self):
return self._parent
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the GeneralField instance """
if self.getParent() == None:
return Locator()
lconf = self.getParent().getLocator()
lconf["sectionFieldId"] = self.getId()
return lconf
class GeneralSectionForm(BaseForm, Fossilizable):
fossilizes(IRegFormGeneralSectionFossil)
def __init__(self, regForm, data=None, required=False):
BaseForm.__init__(self)
self._regForm = regForm
self._id = ""
self._title = _("Miscellaneous information")
self._description = ""
self._required = required
#####
#Mods to support sorting fields
#self._fields=[]
self._sortedFields = []
if data is not None:
self._title = data.get("title", self._title)
self._description = data.get("description", self._description)
self._generalFieldGenerator = Counter()
def setValues(self, data):
title = data.get("title", "").strip()
if title == "":
title = _("Miscellaneous information %s") % self.getId()
self.setTitle(title)
self.setDescription(data.get("description", ""))
if 'required' in data:
self.setRequired(data['required'])
def getValues(self):
values = {}
values["title"] = self.getTitle()
values["description"] = self.getDescription()
values["enabled"] = self.isEnabled()
values["required"] = self.isRequired()
return values
def clone(self, regForm):
gsf = GeneralSectionForm(regForm)
gsf.setId(self.getId())
gsf.setValues(self.getValues())
gsf.setEnabled(self.isEnabled())
gsf.setRequired(self.isRequired())
#Mods to support sorting fields
#for field in self.getFields():
for field in self.getSortedFields():
gsf.addToSortedFields(field.clone(gsf))
return gsf
def getRegistrationForm(self):
return self._regForm
def getConference(self):
return self._regForm.getConference()
def _getGeneralFieldGenerator(self):
return self._generalFieldGenerator
def getId(self):
return self._id
def setId(self, id):
self._id = id
def getTitle(self):
return self._title
def setTitle(self, title):
self._title = title
def getDescription(self):
return self._description
def setDescription(self, description):
self._description = description
def isRequired(self):
try:
return self._required
except:
self._required = False
return False
def setRequired(self, required):
self._required = required
def getSortedFields(self):
try:
returnFields = self._sortedFields
except AttributeError:
self._sortedFields = self._fields
returnFields = self._sortedFields
return returnFields
def addToSortedFields(self, f, i=None):
if i is None:
i = len(self.getSortedFields())
try:
self.getSortedFields().remove(f)
except ValueError, e:
f.setId(str(self._getGeneralFieldGenerator().newCount()))
self.getSortedFields().insert(i, f)
self.notifyModification()
return True
def removeField(self, f):
if f in self.getSortedFields():
self.getSortedFields().remove(f)
self.notifyModification()
def getFieldById(self, id):
for f in self.getSortedFields():
if f.getId() == id:
return f
return None
def getFieldPosById(self, id):
for ind, f in enumerate(self.getSortedFields()):
if f.getId() == id:
return ind
return None
#
#end mods
##########
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the GeneralSectionForm instance """
if self.getRegistrationForm().getConference() == None:
return Locator()
lconf = self.getRegistrationForm().getLocator()
lconf["sectionFormId"] = self.getId()
return lconf
def notifyModification(self):
self._p_changed = 1
class PersonalDataForm(GeneralSectionForm):
def __init__(self, regForm, createFields=True):
GeneralSectionForm.__init__(self, regForm, {'title': 'Personal Data'}, True)
fields = (
{ 'pd': 'title',
'caption': 'Title',
'input': 'radio',
'inputValues': {
'inputType':'dropdown',
'emptyCaption': '',
'radioitems': [{'caption':title} for title in TitlesRegistry.getList()]
},
'lock': ('input', 'delete')
},
{ 'pd':'firstName', 'caption':'First Name', 'mandatory':True, 'lock':('mandatory', 'input', 'delete', 'disable') },
{ 'pd':'surname', 'caption':'Surname', 'mandatory':True, 'lock':('mandatory', 'input', 'delete', 'disable') },
{ 'pd':'position', 'caption':'Position', 'lock':('input', 'delete') },
{ 'pd':'institution', 'caption':'Institution', 'mandatory':True, 'lock':('input', 'delete') },
{ 'pd':'address', 'caption':'Address', 'lock':('input', 'delete') },
{ 'pd':'city', 'caption':'City', 'mandatory':True, 'lock':('input', 'delete') },
{ 'pd':'country', 'caption':'Country', 'input':'country', 'mandatory':True, 'lock':('input', 'delete') },
{ 'pd':'phone', 'caption':'Phone', 'input':'telephone', 'lock':('input', 'delete') },
{ 'pd':'fax', 'caption':'Fax', 'input':'telephone', 'lock':('input', 'delete') },
{ 'pd':'email', 'caption':'Email', 'mandatory':True, 'lock':('mandatory', 'input', 'delete', 'disable') },
{ 'pd':'personalHomepage', 'caption':'Personal homepage', 'lock':('input', 'delete') },
)
self._pdMap = {}
if createFields:
for fieldInfo in fields:
field = GeneralField(self, fieldInfo)
self._pdMap[fieldInfo['pd']] = field
self.addToSortedFields(field)
def clone(self, regForm):
pf = PersonalDataForm(regForm, False)
pf.setId(self.getId())
pf.setValues(self.getValues())
pf.setEnabled(self.isEnabled())
pf.setRequired(self.isRequired())
for field in self.getSortedFields():
f = field.clone(pf)
pf.addToSortedFields(f)
if f.getPDField():
pf._pdMap[f.getPDField()] = f
return pf
def getValueFromParams(self, params, field):
return params.get(self._pdMap[field].getInput().getHTMLName())
def getField(self, field):
return self._pdMap[field]
def getRegistrantValues(self, registrant):
mg = registrant.getMiscellaneousGroupById(self.getId())
return dict((name, mg.getResponseItemById(field.getId()).getValue()) for name, field in self._pdMap.iteritems() if not field.isDisabled())
def getValuesFromAvatar(self, av):
r = dict((k, '') for k in ['title', 'firstName', 'surname', 'institution',
'email', 'address', 'phone', 'fax'])
if av is not None:
r['title'] = av.getTitle()
r['firstName'] = av.getFirstName()
r['surname'] = av.getFamilyName()
r['institution'] = av.getOrganisation()
r['email'] = av.getEmail()
r['address'] = av.getAddress()
r['phone'] = av.getTelephone()
faxes = av.getFaxes()
fax = ''
if len(faxes) > 0:
fax = faxes[0]
r['fax'] = fax
return r
def getFormValuesFromAvatar(self, av):
r = {}
if av is not None:
r[self._pdMap['title'].getInput().getHTMLName()] = av.getTitle()
r[self._pdMap['firstName'].getInput().getHTMLName()] = av.getFirstName()
r[self._pdMap['surname'].getInput().getHTMLName()] = av.getFamilyName()
r[self._pdMap['institution'].getInput().getHTMLName()] = av.getOrganisation()
r[self._pdMap['email'].getInput().getHTMLName()] = av.getEmail()
r[self._pdMap['address'].getInput().getHTMLName()] = av.getAddress()
r[self._pdMap['phone'].getInput().getHTMLName()] = av.getTelephone()
faxes = av.getFaxes()
fax = ''
if len(faxes) > 0:
fax = faxes[0]
r[self._pdMap['fax'].getInput().getHTMLName()] = fax
return r
def getValuesFromRegistrant(self, reg):
r = {}
r['title'] = reg.getTitle()
r['firstName'] = reg.getFirstName()
r['surname'] = reg.getFamilyName()
r['position'] = reg.getPosition()
r['institution'] = reg.getInstitution()
r['address'] = reg.getAddress()
r['city'] = reg.getCity()
r['country'] = reg.getCountry()
r['phone'] = reg.getPhone()
r['fax'] = reg.getFax()
r['email'] = reg.getEmail()
r['personalHomepage'] = reg.getPersonalHomepage()
return r
class PersonalDataFormItem(Persistent): # old
def __init__(self, data=None):
if data is None:
self._id = ""
self._name = ""
self._input = ""
self._mandatory = False
self._enabled = True
else:
self._id = data.get("id", "")
self._name = data.get("name", "")
self._input = data.get("input", "")
self._mandatory = data.get("mandatory", False)
self._enabled = data.get("enabled", True)
def getId(self):
return self._id
def setId(self, id):
self._id = id
def getName(self):
return self._name
def setName(self, name):
self._name = name
def isEnabled(self):
try:
return self._enabled
except:
self.setEnabled()
return self._enabled
def setEnabled(self, enabled=True):
self._enabled = enabled
self._p_changed = 1
def getInput(self):
return self._input
def setInput(self, input):
self._input = input
def isMandatory(self):
return self._mandatory
def setMandatory(self, v):
self._mandatory = v
self._p_changed = 1
class PersonalData(Persistent):
def __init__(self):
self._initStandardPersonalData()
def _initStandardPersonalData(self):
self._data = PersistentMapping()
self._sortedKeys = PersistentList()
p = PersonalDataFormItem({'id':'title', 'name': "Title", 'input':'list', 'mandatory':False})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'firstName', 'name': "First Name", 'input':'text', 'mandatory':True})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'surname', 'name': "Surname", 'input':'text', 'mandatory':True})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'position', 'name': "Position", 'input':'text', 'mandatory':False})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'institution', 'name': "Institution", 'input':'text', 'mandatory':True})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'address', 'name': "Address", 'input':'text', 'mandatory':False})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'city', 'name': "City", 'input':'text', 'mandatory':True})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'country', 'name': "Country/Region", 'input':'list', 'mandatory':True})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'phone', 'name': "Phone", 'input':'text', 'mandatory':False})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'fax', 'name': "Fax", 'input':'text', 'mandatory':False})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'email', 'name': "Email", 'input':'hidden', 'mandatory':True})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
p = PersonalDataFormItem({'id':'personalHomepage', 'name': "Personal homepage", 'input':'text', 'mandatory':False})
self._data[p.getId()] = p
self._sortedKeys.append(p.getId())
def clone(self):
form = PersonalData()
for key, item in self._data.iteritems():
newItem = form.getDataItem(key)
newItem.setEnabled(item.isEnabled())
newItem.setMandatory(item.isMandatory())
return form
def getValuesFromAvatar(self, av):
r = {}
r["title"] = ""
r["firstName"] = ""
r["surname"] = ""
r["institution"] = ""
r["email"] = ""
r["address"] = ""
r["phone"] = ""
r["fax"] = ""
if av is not None:
r["title"] = av.getTitle()
r["firstName"] = av.getFirstName()
r["surname"] = av.getFamilyName()
r["institution"] = av.getOrganisation()
r["email"] = av.getEmail()
r["address"] = av.getAddress()
r["phone"] = av.getTelephone()
faxes = av.getFaxes()
fax = ""
if len(faxes) > 0:
fax = faxes[0]
r["fax"] = fax
return r
def getValuesFromRegistrant(self, reg):
r = {}
r["title"] = reg.getTitle()
r["firstName"] = reg.getFirstName()
r["surname"] = reg.getFamilyName()
r["position"] = reg.getPosition()
r["institution"] = reg.getInstitution()
r["address"] = reg.getAddress()
r["city"] = reg.getCity()
r["country"] = reg.getCountry()
r["phone"] = reg.getPhone()
r["fax"] = reg.getFax()
r["email"] = reg.getEmail()
r["personalHomepage"] = reg.getPersonalHomepage()
return r
def getData(self):
return self._data
def getSortedKeys(self):
return self._sortedKeys
def getMandatoryItems(self):
r = []
for i in self.getSortedKeys():
if self.getData()[i].isMandatory() and self.getData()[i].isEnabled():
r.append(i)
return r
def getDataItem(self, key):
return self._data.get(key, None)
class FurtherInformationForm(BaseForm, Fossilizable):
fossilizes(IRegFormFurtherInformationSectionFossil)
def __init__(self, data=None):
BaseForm.__init__(self)
self._title = "Further information"
self._content = ""
if data is not None:
self._title = data.get("title", self._title)
self._content = data.get("content", self._content)
self._id = "furtherInformation"
def getId(self):
try:
if self._id:
pass
except AttributeError, e:
self._id = "furtherInformation"
return self._id
def setValues(self, data):
self.setTitle(data.get("title", "Further Information"))
self.setContent(data.get("content", ""))
def getValues(self):
values = {}
values["title"] = self.getTitle()
values["content"] = self.getContent()
values["enabled"] = self.isEnabled()
return values
def clone(self):
fif = FurtherInformationForm()
fif.setValues(self.getValues())
fif.setEnabled(self.isEnabled())
return fif
def getTitle(self):
return self._title
def setTitle(self, title):
self._title = title
def getContent(self):
return self._content
def setContent(self, content):
self._content = content
# Fallback for setDescription
setDescription = setContent
def getItems(self):
return ""
class AccommodationType(Persistent, Fossilizable):
fossilizes(IRegFormAccommodationTypeItemFossil)
def __init__(self, rf, data=None):
self._id = ""
self._caption = ""
self._regForm = rf
self._cancelled = False
self._placesLimit = 0
self._currentNoPlaces = 0
self._billable = False
self._price = 0
def setValues(self, data):
self.setCaption(data.get("caption", "--no caption--"))
self.setCancelled(data.has_key("cancelled") and data["cancelled"])
self.setPlacesLimit(data.get("placesLimit", "0"))
self.setBillable(data.has_key("billable") and data["billable"])
self.setPrice(data.get("price"))
self._regForm.notifyModification()
def getValues(self):
values = {}
values["caption"] = self.getCaption()
if self.isCancelled():
values["cancelled"] = self.isCancelled()
values["placesLimit"] = self.getPlacesLimit()
if self.isBillable():
values["billable"] = True
values["price"] = self.getPrice()
return values
def clone(self, registrationForm):
act = AccommodationType(registrationForm)
act.setValues(self.getValues())
return act
def getId(self):
return self._id
def setId(self, id):
self._id = id
def getCaption(self):
return self._caption
def setCaption(self, c):
self._caption = c
def getPlacesLimit(self):
try:
if self._placesLimit:
pass
except AttributeError, e:
self._placesLimit = 0
return self._placesLimit
def setPlacesLimit(self, limit):
if limit == "":
limit = "0"
try:
l = int(limit)
except ValueError, e:
raise FormValuesError(_("Please introduce a number for the limit of places"))
self._placesLimit = l
self.updateCurrentNoPlaces()
def getCurrentNoPlaces(self):
try:
if self._currentNoPlaces:
pass
except AttributeError, e:
self._currentNoPlaces = 0
return self._currentNoPlaces
def hasAvailablePlaces(self):
if self.getPlacesLimit() == 0: #zero means no limit
return True
if self.getCurrentNoPlaces() >= self.getPlacesLimit():
return False
return True
def getNoPlacesLeft(self):
return self.getPlacesLimit() - self.getCurrentNoPlaces()
def increaseNoPlaces(self):
if self.getPlacesLimit() > 0 :
if self.getCurrentNoPlaces() >= self.getPlacesLimit():
raise FormValuesError(_("""The limit for the number of places is smaller than the current amount registered for this accommodation. Please, set a higher limit."""))
self._currentNoPlaces += 1
def decreaseNoPlaces(self):
if self.getPlacesLimit() > 0 and self.getCurrentNoPlaces() > 0:
self._currentNoPlaces -= 1
def updateCurrentNoPlaces(self):
self._currentNoPlaces = 0
for reg in self._regForm.getConference().getRegistrantsList():
acco = reg.getAccommodation()
if acco is not None:
accoType = acco.getAccommodationType()
if accoType is not None and accoType == self:
self.increaseNoPlaces()
def getRegistrationForm(self):
return self._regForm
def setRegistrationForm(self, rf):
self._regForm = rf
def isCancelled(self):
try:
if self._cancelled:
pass
except AttributeError, e:
self._cancelled = False
return self._cancelled
def setCancelled(self, v):
self._cancelled = v
def isBillable(self):
try:
return self._billable
except:
self._billable = False
return self._billable
def setBillable(self, v):
self._billable = v
def getPrice(self):
try:
return self._price
except:
self.setPrice(0)
return self._price
def setPrice(self, price):
if price:
match = PRICE_PATTERN.match(price)
if match:
price = match.group(1)
else:
raise MaKaCError(_('The price is in incorrect format!'))
self._price = price
def getCurrency(self):
return self._regForm.getCurrency()
def remove(self):
self.setCancelled(True)
self.delete()
def delete(self):
self.setRegistrationForm(None)
TrashCanManager().add(self)
def recover(self, rf):
self.setRegistrationForm(rf)
TrashCanManager().remove(self)
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the AccommodationType instance """
if self.getRegistrationForm().getConference() is None:
return Locator()
lconf = self.getRegistrationForm().getLocator()
lconf["accoTypeId"] = self.getId()
return lconf
class AccommodationForm(BaseForm, Fossilizable):
fossilizes(IRegFormAccommodationSectionFossil)
_iterableContainer = '_accommodationTypes'
def __init__(self, regForm, data=None):
BaseForm.__init__(self)
self._accoTypeGenerator = Counter()
self._regForm = regForm
self._title = "Accommodation"
self._description = ""
self._accommodationTypes = PersistentMapping()
if data is not None:
self._title = data.get("title", self._title)
self._description = data.get("description", self._description)
self._setDefaultAccommodationTypes()
self._id = "accommodation"
self._arrivalOffsetDates = [-2, 0]
self._departureOffsetDates = [1, 3]
def getId(self):
try:
if self._id:
pass
except AttributeError, e:
self._id = "accommodation"
return self._id
def getConference(self):
return self._regForm.getConference()
def getArrivalOffsetDates(self):
try:
return self._arrivalOffsetDates
except:
self.setDefaultArrivalOffsetDates()
return self._arrivalOffsetDates
def setDefaultArrivalOffsetDates(self):
self._arrivalOffsetDates = [-2, 0]
def getArrivalDates(self):
offsets = self.getArrivalOffsetDates()
conf = self.getConference()
dates = []
curDate = startDate = conf.getStartDate() + timedelta(days=offsets[0])
endDate = conf.getEndDate() + timedelta(days=offsets[1])
if startDate > endDate:
endDate = startDate
while curDate <= endDate:
dates.append(curDate)
curDate += timedelta(days=1)
return dates
def setArrivalOffsetDates(self, dates):
self._arrivalOffsetDates = dates
def getDepartureOffsetDates(self):
try:
return self._departureOffsetDates
except:
self.setDefaultDepartureOffsetDates()
return self._departureOffsetDates
def setDefaultDepartureOffsetDates(self):
self._departureOffsetDates = [1, 3]
def getDepartureDates(self):
offsets = self.getDepartureOffsetDates()
conf = self.getConference()
dates = []
curDate = startDate = conf.getStartDate() + timedelta(days=offsets[0])
endDate = conf.getEndDate() + timedelta(days=offsets[1])
if startDate > endDate:
endDate = startDate
while curDate <= endDate:
dates.append(curDate)
curDate += timedelta(days=1)
return dates
def setDepartureOffsetDates(self, dates):
self._departureOffsetDates = dates
def _setDefaultAccommodationTypes(self):
a = AccommodationType(self._regForm)
a.setId("cern")
a.setCaption("CERN Hostel")
self._accommodationTypes[a.getId()] = a
a = AccommodationType(self._regForm)
a.setId("own-accommodation")
a.setCaption("I will arrange my own accommodation")
self._accommodationTypes[a.getId()] = a
a = AccommodationType(self._regForm)
a.setId("geneva-hotel")
a.setCaption("I prefer to book a room in a Geneva hotel")
self._accommodationTypes[a.getId()] = a
def setValues(self, data):
self.setTitle(data.get("title", "Accommodation"))
self.setDescription(data.get("description", ""))
self.setArrivalOffsetDates([int(data.get("aoffset1", -2)), int(data.get("aoffset2", 0))])
self.setDepartureOffsetDates([int(data.get("doffset1", 1)), int(data.get("doffset2", 3))])
def getValues(self):
values = {}
values["title"] = self.getTitle()
values["description"] = self.getDescription()
values["enabled"] = self.isEnabled()
values["aoffset1"] = self.getArrivalOffsetDates()[0]
values["aoffset2"] = self.getArrivalOffsetDates()[1]
values["doffset1"] = self.getDepartureOffsetDates()[0]
values["doffset2"] = self.getDepartureOffsetDates()[1]
return values
def clone(self, registrationForm):
acf = AccommodationForm(registrationForm)
acf.setValues(self.getValues())
acf.setEnabled(self.isEnabled())
acf._accommodationTypes = PersistentMapping()
for at in self.getAccommodationTypesList() :
acf.addAccommodationType(at.clone(registrationForm))
return acf
def getTitle(self):
return self._title
def setTitle(self, title):
self._title = title
def getDescription(self):
return self._description
def setDescription(self, description):
self._description = description
def getRegistrationForm(self):
return self._regForm
def _generateNewAccoTypeId(self):
"""Returns a new unique identifier for the current registration form
"""
try:
return str(self._accoTypeGenerator.newCount())
except:
self._accoTypeGenerator = Counter()
return str(self._accoTypeGenerator.newCount())
def addAccommodationType(self, accom):
id = accom.getId()
if id == "":
id = self._generateNewAccoTypeId()
accom.setId(id)
self._accommodationTypes[id] = accom
def removeAccommodationType(self, accom):
accom.remove()
if self._accommodationTypes.has_key(accom.getId().strip()):
del(self._accommodationTypes[accom.getId().strip()])
def recoverAccommodationType(self, accom):
self.addAccommodationType(accom)
accom.recover(self.getRegistrationForm())
def getAccommodationTypeById(self, id):
if self._accommodationTypes.has_key(id.strip()):
return self._accommodationTypes[id]
return None
def getAccommodationTypesList(self):
return self._accommodationTypes.values()
def clearAccommodationTypesList(self):
for at in self.getAccommodationTypesList():
self.removeAccommodationType(at)
class ReasonParticipationForm(BaseForm, Fossilizable):
fossilizes(IRegFormReasonParticipationSectionFossil)
def __init__(self, data=None):
BaseForm.__init__(self)
self._title = "Reason for participation"
self._description = "Please, let us know why you are interested to participate in our event:"
if data is not None:
self._title = data.get("title", self._title)
self._description = data.get("description", self._description)
self._id = "reasonParticipation"
def getId(self):
try:
if self._id:
pass
except AttributeError, e:
self._id = "reasonParticipation"
return self._id
def setValues(self, data):
self.setTitle(data.get("title", "Reason for participation"))
self.setDescription(data.get("description", ""))
def getValues(self):
values = {}
values["title"] = self.getTitle()
values["description"] = self.getDescription()
return values
def clone(self):
rpf = ReasonParticipationForm()
rpf.setValues(self.getValues())
rpf.setEnabled(self.isEnabled())
return rpf
def getTitle(self):
return self._title
def setTitle(self, title):
self._title = title
def getDescription(self):
return self._description
def setDescription(self, description):
self._description = description
def getItems(self):
#No items for this form
return ""
class RegistrationSession(Persistent, Fossilizable):
fossilizes(IRegFormRegistrationSessionItemFossil)
def __init__(self, ses, regForm=None):
self._session = ses
self._session.setRegistrationSession(self)
self._regForm = regForm
self._price = 0
self._billable = False
self._currency = regForm.getCurrency()
def setValues(self, data):
self.setBillable(data.has_key("billable") and data["billable"])
self.setPrice(data.get("price"))
def getValues(self):
data = {}
if self.isBillable():
data["billable"] = True
data["price"] = self.getPrice()
return data
def getSession(self):
return self._session
def setSession(self, ses):
self._session = ses
self._billable = ses.isBillable()
self._price = ses.getPrice()
def getRegistrationForm(self):
return self._regForm
def setRegistrationForm(self, rf):
self._regForm = rf
def getParent(self):
# The parent of registration session is "session form"
if self._regForm is not None:
return self._regForm.getSessionsForm()
return None
def getConference(self):
if self._regForm is not None:
return self._regForm.getConference()
return None
def remove(self):
#self._session.setRegistrationSession(None)
self.setRegistrationForm(None)
pass
def isCancelled(self):
## return self._session is None or not self.getParent().hasSession(self.getId())
## return not self.getParent().hasSession(self.getId())
return not self.getRegistrationForm()
def getId(self):
return self._session.getId()
def getTitle(self):
return self._session.getTitle()
# for compatibility with other fields
getCaption = getTitle
def getStartDate(self):
return self._session.getStartDate()
def getCode(self):
return self._session.getCode()
def getPrice(self):
try:
return self._price
except:
self.setPrice(0)
return self._price
def setPrice(self, price):
if price:
match = PRICE_PATTERN.match(price)
if match:
price = match.group(1)
else:
raise MaKaCError(_('The price is in incorrect format!'))
self._price = price
def isBillable(self):
try:
return self._billable
except:
self._billable = False
return self._billable
def setBillable(self, v):
self._billable = v
def getCurrency(self):
if not hasattr(self, "_currency") or not self._currency:
# it may happen that _regForm doesn't exist (session was removed from it)
if self._regForm:
self._currency = self._regForm.getCurrency()
else:
self._currency = None
return self._currency
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the RegistrationSession instance """
if self.getRegistrationForm().getConference() == None:
return Locator()
lconf = self.getRegistrationForm().getLocator()
lconf["sessionId"] = self.getId()
return lconf
@staticmethod
def _cmpTitle(s1, s2):
if s1 is None and s2 is not None:
return -1
elif s1 is not None and s2 is None:
return 1
elif s1 is None and s2 is None:
return 0
return cmp(s1.getTitle(), s2.getTitle())
class SessionsForm(BaseForm, Fossilizable):
fossilizes(IRegFormSessionSectionFossil)
_iterableContainer = '_sessions'
def __init__(self, data=None):
BaseForm.__init__(self)
self._title = "Sessions"
self._type = "2priorities"
self._description = ""
self._sessions = PersistentMapping()
if data is not None:
self._title = data.get("title", self._title)
self._description = data.get("description", self._description)
self._sessions = data.get("sessions", self._sessions)
self._id = "sessions"
def getId(self):
try:
if self._id:
pass
except AttributeError, e:
self._id = "sessions"
return self._id
def clone(self, newSessions):
sesf = SessionsForm()
sesf.setTitle(self.getTitle())
sesf.setType(self.getType())
sesf.setDescription(self.getDescription())
sesf.setEnabled(self.isEnabled())
for s in newSessions:
ses = self.getSessionById(s.getId())
if ses:
s.setValues(ses.getValues())
sesf.addSession(s)
return sesf
def getValues(self):
data = {}
data["title"] = self.getTitle()
data["description"] = self.getDescription()
data["enabled"] = self.isEnabled()
data["type"] = self.getType()
return data
def setValues(self, data):
self.setTitle(data.get("title", "Sessions"))
self.setDescription(data.get("description", ""))
self.setType(data.get("sessionFormType", "2priorities"))
def getTitle(self):
return self._title
def setTitle(self, title):
self._title = title
def getDescription(self):
return self._description
def setDescription(self, description):
self._description = description
def getType(self):
try:
if self._type:
pass
except AttributeError, e:
self._type = "2priorities"
return self._type
def setType(self, type):
self._type = type
def getSessionsFromParams(self, params):
sessions = []
if self.isEnabled():
if self.getType() == "2priorities":
if params.get("session1", "nosession") == "nosession":
raise FormValuesError(_("Please, choose at least one session in order to register"))
if params.get("session1", "") == params.get("session2", "nosession"):
raise FormValuesError(_("You cannot choose the same session twice"))
sessions.append(self.getSessionById(params.get("session1")))
ses2 = self.getSessionById(params.get("session2", "nosession"))
if ses2 is not None:
sessions.append(ses2)
elif self.getType() == "all":
sess = params.get("sessions", [])
if type(sess) != list:
sess = [sess]
for ses in sess:
if self.hasSession(ses):
sessions.append(self.getSessionById(ses))
return [RegistrantSession(ses) for ses in sessions]
def getSessionList(self, doSort=False):
lv = self._sessions.values()
lv.sort(sortByStartDate)
if doSort:
lv.sort(RegistrationSession._cmpTitle)
return lv
def getSessions(self):
return self._sessions
def addSession(self, ses):
if not self._sessions.has_key(ses.getId()):
self._sessions[ses.getId()] = ses
def removeSession(self, sesId):
if self._sessions.has_key(sesId):
self._sessions[sesId].remove()
del self._sessions[sesId]
def clearSessionList(self):
for s in self.getSessionList():
self.removeSession(s)
def hasSession(self, key):
return self._sessions.has_key(key)
def getSessionById(self, id):
return self._sessions.get(id, None)
def sortByStartDate(x, y):
return cmp(x.getSession().getStartDate(), y.getSession().getStartDate())
class SocialEventItem(Persistent, Fossilizable):
fossilizes(IRegFormSocialEventItemFossil)
def __init__(self, rf, data=None):
self._id = ""
self._caption = "--no caption--"
self._regForm = rf
self._cancelled = False
self._cancelledReason = ""
self._maxPlacePerRegistrant = 10
self._placesLimit = 0
self._currentNoPlaces = 0
self._billable = False
self._price = 0
self._pricePerPlace = False
def setValues(self, data):
if "caption" in data:
self.setCaption(data["caption"])
if "cancelled" in data:
self.setCancelled(data["cancelled"])
if "cancelledReason" in data:
self.setCancelledReason(data["cancelledReason"])
if "maxPlace" in data:
try:
maxPlace = int(data["maxPlace"])
except ValueError:
maxPlace = 0
if maxPlace < 0:
maxPlace = 0
self.setMaxPlacePerRegistrant(maxPlace)
if "placesLimit" in data:
self.setPlacesLimit(data["placesLimit"])
if "billable" in data:
self.setBillable(data["billable"])
if "billable" in data:
self.setPricePerPlace(data["pricePerPlace"])
if "price" in data:
self.setPrice(data["price"])
def getValues(self):
data = {}
data["caption"] = self.getCaption()
if self.isCancelled():
data["cancelled"] = self.isCancelled()
data["cancelledReason"] = self.getCancelledReason()
data["maxPlace"] = self.getMaxPlacePerRegistrant()
data["placesLimit"] = self.getPlacesLimit()
if self.isBillable():
data["billable"] = True
if self.isPricePerPlace():
data["pricePerPlace"] = True
data["price"] = self.getPrice()
return data
def clone(self, regForm):
newSEI = SocialEventItem(regForm)
newSEI.setValues(self.getValues())
return newSEI
def getId(self):
return self._id
def setId(self, id):
self._id = id
def getCaption(self):
return self._caption
def setCaption(self, c):
self._caption = c
def getPlacesLimit(self):
try:
if self._placesLimit:
pass
except AttributeError, e:
self._placesLimit = 0
return self._placesLimit
def setPlacesLimit(self, limit):
if limit == "":
limit = "0"
try:
l = int(limit)
except ValueError, e:
raise FormValuesError(_("Please introduce a number for the limit of places"))
self._placesLimit = l
self.updateCurrentNoPlaces()
def getCurrentNoPlaces(self):
try:
if self._currentNoPlaces:
pass
except AttributeError, e:
self._currentNoPlaces = 0
return self._currentNoPlaces
def hasAvailablePlaces(self):
if self.getCurrentNoPlaces() >= self.getPlacesLimit():
return False
return True
def getNoPlacesLeft(self):
return self.getPlacesLimit() - self.getCurrentNoPlaces()
def increaseNoPlaces(self, n):
if self.getPlacesLimit() > 0 :
if (self.getCurrentNoPlaces() + n) > self.getPlacesLimit():
raise FormValuesError(_("We are sorry but there are not enough places for the social event \"%s\". \
") % (self.getCaption()))
self._currentNoPlaces += n
def decreaseNoPlaces(self, n):
if self.getPlacesLimit() > 0 and self.getCurrentNoPlaces() > 0:
if (self._currentNoPlaces - n) < 0:
raise FormValuesError(_("Impossible to decrease %s places for \"%s\" because the current number of \
places would be less than zero") % (n, self.getCaption()))
self._currentNoPlaces -= n
def updateCurrentNoPlaces(self):
self._currentNoPlaces = 0
for reg in self._regForm.getConference().getRegistrantsList():
for se in reg.getSocialEvents():
if se.getSocialEventItem() == self:
self.increaseNoPlaces(se.getNoPlaces())
def getRegistrationForm(self):
return self._regForm
def setRegistrationForm(self, rf):
self._regForm = rf
def isCancelled(self):
try:
if self._cancelled:
pass
except AttributeError, e:
self._cancelled = False
return self._cancelled
def setCancelled(self, v):
self._cancelled = v
def getCancelledReason(self):
try:
if self._cancelledReason:
pass
except AttributeError:
self._cancelledReason = ""
return self._cancelledReason
def setCancelledReason(self, cr):
self._cancelledReason = cr
def getMaxPlacePerRegistrant(self):
try:
return self._maxPlacePerRegistrant
except AttributeError:
self._maxPlacePerRegistrant = 9
return self._maxPlacePerRegistrant
def setMaxPlacePerRegistrant(self, numPlace):
self._maxPlacePerRegistrant = numPlace
def isBillable(self):
try:
return self._billable
except:
self._billable = False
return self._billable
def setBillable(self, v):
self._billable = v
def isPricePerPlace(self):
try:
return self._pricePerPlace
except:
self._pricePerPlace = False
return self._pricePerPlace
def setPricePerPlace(self, v):
self._pricePerPlace = v
def getPrice(self):
try:
return self._price
except:
self.setPrice(0)
return self._price
def setPrice(self, price):
if price:
match = PRICE_PATTERN.match(price)
if match:
price = match.group(1)
else:
raise MaKaCError(_('The price is in incorrect format!'))
self._price = price
def getCurrency(self):
return self._regForm.getCurrency()
def remove(self):
self.setCancelled(True)
self.delete()
def delete(self):
self.setRegistrationForm(None)
TrashCanManager().add(self)
def recover(self, rf):
self.setRegistrationForm(rf)
TrashCanManager().remove(self)
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the SocialEventItem instance """
if self.getRegistrationForm().getConference() == None:
return Locator()
lconf = self.getRegistrationForm().getLocator()
lconf["socialEventId"] = self.getId()
return lconf
@staticmethod
def _cmpCaption(se1, se2):
return cmp(se1.getCaption().lower(), se2.getCaption().lower())
class SocialEventForm(BaseForm, Fossilizable):
fossilizes(IRegFormSocialEventSectionFossil)
_iterableContainer = '_socialEvents'
def __init__(self, regForm, data=None):
BaseForm.__init__(self)
self._socialEventItemGenerator = Counter()
self._regForm = regForm
self._title = "Social Events"
self._description = ""
self._introSentence = self._getDefaultIntroValue()
self._mandatory = False
self._selectionType = "multiple"
self._socialEvents = PersistentMapping()
if data is not None:
self._title = data.get("title", self._title)
self._description = data.get("description", self._description)
self._mandatory = data.get('mandatory', False)
self._id = "socialEvents"
def getId(self):
try:
if self._id:
pass
except AttributeError, e:
self._id = "socialEvents"
return self._id
def setValues(self, data):
self.setTitle(data.get("title", "Sessions"))
self.setDescription(data.get("description", ""))
self.setIntroSentence(data.get("intro", ""))
self.setSelectionType(data.get("selectionType", "multiple"))
self.setMandatory(data.get('mandatory', False))
def getValues(self):
values = {}
values["title"] = self.getTitle()
values["description"] = self.getDescription()
values["intro"] = self.getIntroSentence()
values["selectionType"] = self.getSelectionTypeId()
values["mandatory"] = self.getMandatory()
return values
def clone(self, registrationForm):
sef = SocialEventForm(registrationForm)
sef.setValues(self.getValues())
sef.setEnabled(self.isEnabled())
for se in self.getSocialEventList():
sef.addSocialEvent(se.clone(registrationForm))
return sef
def getTitle(self):
return self._title
def setTitle(self, title):
self._title = title
def getDescription(self):
return self._description
def setDescription(self, description):
self._description = description
def getMandatory(self):
try:
return self._mandatory
except AttributeError:
self._mandatory = False
return False
def setMandatory(self, value):
self._mandatory = value
def getRegistrationForm(self):
try:
if self._regForm:
pass
except AttributeError, e:
self._regForm = None
return self._regForm
def getConference(self):
if self.getRegistrationForm() is not None:
return self.getRegistrationForm().getConference()
return None
def _getDefaultIntroValue(self):
return "Select the social events you would like to attend and how many places you will need"
def getIntroSentence(self):
try:
if self._introSentence:
pass
except AttributeError, e:
self._introSentence = self._getDefaultIntroValue()
return self._introSentence
def setIntroSentence(self, intro):
self._introSentence = intro
def getSelectionTypeList(self):
try:
if self._selectionTypeList:
pass
except AttributeError, e:
self._selectionTypeList = {"multiple": "Multiple choice",
"unique": "Unique choice"}
return self._selectionTypeList
def _getSelectionType(self):
try:
if self._selectionType:
pass
except AttributeError, e:
self._selectionType = "multiple"
return self._selectionType
def getSelectionTypeId(self):
return self._getSelectionType()
def getSelectionTypeCaption(self):
return self.getSelectionTypeList()[self._getSelectionType()]
def setSelectionType(self, id):
self._selectionType = id
def _generateNewSocialEventItemId(self):
"""Returns a new unique identifier for the current registration form
"""
try:
return str(self._socialEventItemGenerator.newCount())
except:
self._socialEventItemGenerator = Counter()
return str(self._socialEventItemGenerator.newCount())
def addSocialEvent(self, se):
id = se.getId()
if id == "":
id = self._generateNewSocialEventItemId()
se.setId(id)
self._socialEvents[id] = se
def removeSocialEvent(self, se):
se.remove()
if self._socialEvents.has_key(se.getId().strip()):
del(self._socialEvents[se.getId().strip()])
def recoverSocialEvent(self, se):
self.addSocialEvent(se)
se.recover(self.getRegistrationForm())
def getSocialEventById(self, id):
if self._socialEvents.has_key(id.strip()):
return self._socialEvents[id]
return None
def getSocialEventList(self, sort=False):
v = self._socialEvents.values()
if sort:
v.sort(SocialEventItem._cmpCaption)
return v
def clearSocialEventList(self):
for se in self.getSocialEventList():
self.removeSocialEvent(se)
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the GeneralField instance """
if self.getConference() == None:
return Locator()
lconf = self.getConference().getLocator()
lconf["sectionFieldId"] = self.getId()
return lconf
class StatusValue(Persistent):
def __init__(self, st, data=None):
self._status = st
self._id = ""
self._caption = ""
if data is not None:
self.setValues(data)
def getValues(self):
d = {}
d["caption"] = self.getCaption()
return d
def setValues(self, d):
self.setCaption(d.get("caption", "-- no caption --"))
def getId(self):
return self._id
def setId(self, id):
self._id = id
def getCaption(self):
return self._caption
def setCaption(self, cp):
self._caption = cp
def clone(self, st):
sv = StatusValue(st)
sv.setCaption(self.getCaption())
return sv
def _cmpCaption(sv1, sv2):
return cmp(sv1.getCaption().strip().lower(), sv2.getCaption().strip().lower())
_cmpCaption = staticmethod(_cmpCaption)
class Status(Persistent):
def __init__(self, regForm, data=None):
self._regForm = regForm
self._statusValues = {}
self._valuesGenerator = Counter()
self._id = ""
self._caption = ""
self._defaultValue = None
if data is not None:
self.setValues(data)
self.addStatusValue(StatusValue(self, {"caption":"Yes"}))
self.addStatusValue(StatusValue(self, {"caption":"No"}))
def setValues(self, d):
self.setCaption(d.get("caption", ""))
ids = []
defaultValueSet = False
if d.has_key("values") and type(d.get("values", [])) == list:
for vd in d.get("values", []):
id = vd.get("id", "")
if self.getStatusValueById(id) is not None:
v = self.getStatusValueById(id)
v.setValues(vd)
else:
v = StatusValue(self, vd)
self.addStatusValue(v)
if d.get("defaultvalue", "").strip() == id:
defaultValueSet = True
self.setDefaultValue(v)
ids.append(v.getId())
if not defaultValueSet:
self.setDefaultValue(None)
for v in self.getStatusValuesList()[:]:
if v.getId() not in ids:
self.removeStatusValue(v)
def getValues(self):
d = {}
d["caption"] = self.getCaption()
return d
def getConference(self):
return self._regForm.getConference()
def getId(self):
return self._id
def setId(self, i):
self._id = i
def getCaption(self):
return self._caption
def setCaption(self, c):
self._caption = c
def setDefaultValue(self, stval):
self._defaultValue = stval
def getDefaultValue(self):
return self._defaultValue
def _generateValueId(self):
"""Returns a new unique identifier for the current registration form
"""
try:
return str(self._valuesGenerator.newCount())
except:
self._valuesGenerator = Counter()
return str(self._valuesGenerator.newCount())
def getStatusValues(self):
return self._statusValues
def getStatusValuesList(self, sort=False):
r = self._statusValues.values()
if sort:
r.sort(StatusValue._cmpCaption)
return r
def hasStatusValue(self, v):
if v is not None and self.getStatusValues().has_key(v.getId()):
return True
return False
def getStatusValueById(self, id):
if self.getStatusValues().has_key(id):
return self.getStatusValues()[id]
return None
def addStatusValue(self, v):
v.setId(self._generateValueId())
self.getStatusValues()[v.getId()] = v
self.notifyModification()
def removeStatusValue(self, v):
if self.getStatusValues().has_key(v.getId()):
del self.getStatusValues()[v.getId()]
self.notifyModification()
def _cmpCaption(s1, s2):
return cmp(s1.getCaption().lower().strip(), s2.getCaption().lower().strip())
_cmpCaption = staticmethod(_cmpCaption)
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the Status instance """
if self.getConference() == None:
return Locator()
lconf = self.getConference().getLocator()
lconf["statusId"] = self.getId()
return lconf
def notifyModification(self):
"""Method called to notify that the registration form has been modified.
"""
self._p_changed = 1
# Users --------- FINAL INFORMATION STORED FROM THE REGISTRATION FORM
class Registrant(Persistent, Fossilizable):
fossilizes(IRegFormRegistrantFossil, IRegFormRegistrantBasicFossil, IRegFormRegistrantFullFossil)
def __init__(self):
self._conf = None
self._avatar = None
self._id = ""
self._complete = False
self._registrationDate = nowutc()
self._checkedIn = False
self._checkInDate = None
self._checkInUUID = str(uuid4())
self._title = ""
self._firstName = ""
self._surname = ""
self._position = ""
self._institution = ""
self._address = ""
self._city = ""
self._country = ""
self._phone = ""
self._fax = ""
self._email = ""
self._personalHomepage = ""
self._sessions = []
self._socialEvents = []
self._accommodation = Accommodation(self)
self._reasonParticipation = ""
self._miscellaneous = {}
self._parmasReturn = {}
self._statuses = {}
self._total = 0
self._hasPay = False
self._transactionInfo = None
self._randomId = self._generateRandomId()
self._attachmentsCounter = Counter()
def __cmp__(self, other):
if type(self) is not type(other):
# This is actually dangerous and the ZODB manual says not to do this
# because it relies on memory order. However, this branch should never
# be taken anyway since we do not store different types in the same set
# or use them as keys.
return cmp(hash(self), hash(other))
if self.getConference() == other.getConference():
return cmp(self.getId(), other.getId())
return cmp(self.getConference(), other.getConference())
def isPayedText(self):
if self.getPayed():
return "Yes"
elif not self.doPay():
return "-"
return "No"
def getIdPay(self):
return "c%sr%s" % (self._conf.getId(), self.getId())
def setTotal(self, total):
self._total = total
def getTotal(self):
try:
return self._total
except:
self.setTotal(0)
return self._total
def updateTotal(self):
total = 0
for gs in self.getRegistrationForm().getGeneralSectionFormsList():
if gs.isEnabled():
mg = self.getMiscellaneousGroupById(gs.getId())
if mg != None:
for miscItem in mg.getResponseItemList():
if miscItem.isBillable():
price = float(miscItem.getPrice() or 0)
else:
price = 0
quantity = miscItem.getQuantity()
total += price * quantity
for bf in self.getBilledForms():
for item in bf.getBilledItems():
total += item.getPrice() * item.getQuantity()
self.setTotal(total)
def doPay(self):
return self.getTotal() > 0 and not self.getPayed()
def setPersonalData(self, data):
self.getConference().updateRegistrantIndexByEmail(self, data.get("email", ""))
self.setTitle(data.get("title", ""))
self.setFirstName(data.get("firstName", ""))
self.setSurName(data.get("surname", ""))
self.setPosition(data.get("position", ""))
self.setInstitution(data.get("institution", ""))
self.setAddress(data.get("address", ""))
self.setCity(data.get("city", ""))
self.setCountry(data.get("country", ""))
self.setPhone(data.get("phone", ""))
self.setFax(data.get("fax", ""))
self.setEmail(data.get("email", ""))
self.setPersonalHomepage(data.get("personalHomepage", ""))
def setValues(self, data, av):
self._avatar = av
if self.getRegistrationForm().getReasonParticipationForm().isEnabled():
self.setReasonParticipation(data.get("reason", ""))
if self.getRegistrationForm().getSessionsForm().isEnabled():
sessions = data.get("sessions", [])
if not isinstance(sessions, list):
sessions = [sessions]
if not self.getPayed():
self.setSessions(sessions)
else:
# First keep all sessions which are billable (they are not submitted anymore)
newSessions = [session for session in self.getSessionList() if session.isBillable()]
# Then take all chosen sessions which are not billable
newSessions += [session for session in sessions if not session.isBillable()]
self.setSessions(newSessions)
else:
self.setSessions([])
self.setSessionBillingEnabled(self.getRegistrationForm().getSessionsForm().getType() != "2priorities")
if self.getRegistrationForm().getAccommodationForm().isEnabled():
ad = data.get("arrivalDate", None)
dd = data.get("departureDate", None)
if ad == "nodate":
raise FormValuesError(_("Arrival date cannot be empty."))
elif dd == "nodate":
raise FormValuesError(_("Departure date cannot be empty."))
if ad is not None and dd is not None:
ad = map(lambda x: int(x), ad.split("-"))
ad = datetime(ad[2], ad[1], ad[0])
dd = map(lambda x: int(x), dd.split("-"))
dd = datetime(dd[2], dd[1], dd[0])
if ad > dd:
raise FormValuesError(_("Arrival date has to be earlier than departure date"))
# Allow changing of the dates only if the current accomodation is not billable or the user hasn't paid yet
currentAccoType = self._accommodation.getAccommodationType()
if not self.getPayed() or currentAccoType is None or not currentAccoType.isBillable():
self._accommodation.setArrivalDate(ad)
self._accommodation.setDepartureDate(dd)
accoType = data.get("accommodationType", None)
if accoType is not None and accoType.isCancelled():
accoType = None
if self.getRegistrationForm().getAccommodationForm().getAccommodationTypesList() != []:
# Only change the accommodation type if:
# - the registrant hasn't paid yet OR
# - neither the current nor the new accommodation is billable
if not self.getPayed() or \
((currentAccoType is None or not currentAccoType.isBillable()) and \
(accoType is None or not accoType.isBillable())):
if self.getRegistrationForm().getAccommodationForm().getAccommodationTypesList() != [] and data.get("accommodation_type", None) is None:
raise FormValuesError(_("It is mandatory to choose an accommodation in order to register"))
self._accommodation.setAccommodationType(accoType)
else: # AccommodationForm disabled
self._accommodation.setAccommodationType(None)
if self.getRegistrationForm().getSocialEventForm().isEnabled():
for seItem in self.getSocialEvents()[:]:
# Remove all items which can be added back (i.e. if paid only non-billable ones)
if not (self.getPayed() and seItem.isBillable()):
self.removeSocialEventById(seItem.getId())
for seItem in data.get("socialEvents", []):
# Only add item if the registrant hasn't paid yet or the item is not billable
if seItem and (not self.getPayed() or not seItem.isBillable()):
newSE = SocialEvent(seItem, int(data.get("places-%s" % seItem.getId(), "1")))
self.addSocialEvent(newSE)
if self.getRegistrationForm().getSocialEventForm().getMandatory() and not self.getSocialEvents():
raise FormValuesError(_('You have to select at least one social event'))
else:
for seItem in self.getSocialEvents()[:]:
self.removeSocialEventById(seItem.getId())
#if not self.getPayed():
# self._miscellaneous = {}
total = 0
for gs in self.getRegistrationForm().getGeneralSectionFormsList():
if gs.isEnabled():
mg = self.getMiscellaneousGroupById(gs.getId())
if mg == None:
mg = MiscellaneousInfoGroup(self, gs)
self.addMiscellaneousGroup(mg)
#Mods to support sorting fields
#for f in gs.getFields():
for f in gs.getSortedFields():
if not f.isDisabled():
f.getInput().setResponseValue(mg.getResponseItemById(f.getId()), data, self, mg)
for miscItem in mg.getResponseItemList():
if miscItem.isBillable():
price = float(miscItem.getPrice() or 0)
else:
price = 0
quantity = miscItem.getQuantity()
total += price * quantity
for bf in self.getBilledForms():
for item in bf.getBilledItems():
total += item.getPrice() * item.getQuantity()
if not self.getPayed():
self.setTotal(total)
self.setPersonalData(self.getRegistrationForm().getPersonalData().getRegistrantValues(self))
self._complete = True
def isComplete(self):
try:
if self._complete:
pass
except AttributeError, e:
self._complete = False
return self._complete
def isCheckedIn(self):
try:
if self._checkedIn:
pass
except AttributeError:
self._checkedIn = False
return self._checkedIn
def setCheckedIn(self, checkedIn):
if checkedIn:
self._checkInDate = nowutc()
else:
self._checkInDate = None
self._checkedIn = checkedIn
def getCheckInUUID(self):
try:
if self._checkInUUID:
pass
except AttributeError:
self._checkInUUID = str(uuid4())
return self._checkInUUID
def getCheckInDate(self):
try:
if self._checkInDate:
pass
except AttributeError:
self._checkInDate = None
return self._checkInDate
def getAdjustedCheckInDate(self,tz=None):
if not tz:
tz = self.getConference().getTimezone()
if tz not in all_timezones:
tz = 'UTC'
checkInDate = self.getCheckInDate()
if checkInDate:
return checkInDate.astimezone(timezone(tz))
def getPayed(self):
try:
return self._hasPay
except:
self.setPayed(False)
return self._hasPay
def setPayed(self, hasPay):
self._hasPay = hasPay
def getTransactionInfo(self):
try:
return self._transactionInfo
except:
self.setTransactionInfo(False)
return self._transactionInfo
def setTransactionInfo(self, transactionInfo):
self._transactionInfo = transactionInfo
def _generateRandomId(self):
n = datetime.now()
return md5(str(random.random() + time.mktime(n.timetuple()))).hexdigest()
def getRandomId(self):
try:
if self._randomId:
pass
except AttributeError, e:
self._randomId = self._generateRandomId()
return self._randomId
def getId(self):
return self._id
def setId(self, id):
self._id = str(id).strip()
def getConference(self):
return self._conf
def setConference(self, c):
self._conf = c
def getOwner(self):
return self.getConference()
def setOwner(self, o):
self.setConference(o)
def getAvatar(self):
return self._avatar
def setAvatar(self, a):
if isinstance(self._avatar, MaKaC.user.Avatar):
self._avatar.unlinkTo(self, "registrant")
self._avatar = a
a.linkTo(self, "registrant")
def getRegistrationForm(self):
return self.getConference().getRegistrationForm()
def getRegistrationDate(self):
try:
if self._registrationDate:
pass
except AttributeError, e:
self._registrationDate = None
return self._registrationDate
def getAdjustedRegistrationDate(self, tz=None):
if not tz:
tz = self.getConference().getTimezone()
if tz not in all_timezones:
tz = 'UTC'
return self.getRegistrationDate().astimezone(timezone(tz))
def getTitle(self):
return self._title
def setTitle(self, v):
self._title = v
def getFirstName(self):
return self._firstName
def setFirstName(self, v):
self._firstName = v
def getSurName(self):
return self._surname
getFamilyName = getSurName
def setSurName(self, v):
self._surname = v
setFamilyName = setSurName
def getFullName(self, title=True, firstNameFirst=False):
if firstNameFirst:
res = "%s %s" % (self.getFirstName(), self.getFamilyName())
res = res.strip()
else:
res = safe_upper(self.getFamilyName())
if self.getFirstName():
res = "%s, %s" % (res, self.getFirstName())
if title and self.getTitle():
res = "%s %s" % (self.getTitle(), res)
return res
def getPosition(self):
return self._position
def setPosition(self, v):
self._position = v
def getInstitution(self):
return self._institution
def setInstitution(self, v):
self._institution = v
def getAddress(self):
return self._address
def setAddress(self, v):
self._address = v
def getCity(self):
return self._city
def setCity(self, v):
self._city = v
def getCountry(self):
return self._country
def setCountry(self, v):
self._country = v
def getPhone(self):
return self._phone
def setPhone(self, v):
self._phone = v
def getFax(self):
return self._fax
def setFax(self, v):
self._fax = v
def getEmail(self):
return self._email
def setEmail(self, v):
self._email = v
def getPersonalHomepage(self):
return self._personalHomepage
def setPersonalHomepage(self, v):
self._personalHomepage = v
def getSessionList(self):
return self._sessions
def addSession(self, ses):
self._sessions.append(ses)
self.notifyModification()
def removeSession(self, ses):
self._sessions.remove(ses)
self.notifyModification()
def setSessions(self, sesList):
self._sessions = sesList
for ses in self._sessions:
ses.setRegistrant(self)
self.notifyModification()
def setAccommodation(self, a):
self._accommodation = a
def getAccommodation(self):
return self._accommodation
def setReasonParticipation(self, a):
self._reasonParticipation = a
def getReasonParticipation(self):
return self._reasonParticipation
def getSocialEvents(self):
try:
if self._socialEvents:
pass
except AttributeError, e:
self._socialEvents = []
return self._socialEvents
def getSocialEventById(self, id):
for se in self.getSocialEvents():
if id == se.getId():
return se
return None
def setSocialEvents(self, se):
self._socialEvents = se
self.notifyModification()
def addSocialEvent(self, se):
se.setRegistrant(self)
self.getSocialEvents().append(se)
self.notifyModification()
def removeSocialEventById(self, id):
se = self.getSocialEventById(id)
se.delete()
self.getSocialEvents().remove(se)
self.notifyModification()
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the registrant instance """
if self.getConference() == None:
return Locator()
lconf = self.getConference().getLocator()
lconf["registrantId"] = self.getId()
return lconf
def notifyModification(self):
"""Method called to notify the current registered participant has been modified.
"""
self._p_changed = 1
def _cmpFamilyName(r1, r2):
if r1 is None and r2 is None:
return 0
if r1 is None:
return -1
if r2 is None:
return 1
return cmp(r1.getFamilyName().lower(), r2.getFamilyName().lower())
_cmpFamilyName = staticmethod(_cmpFamilyName)
def getMiscellaneousGroups(self):
try:
if self._miscellaneous:
pass
except AttributeError, e:
self._miscellaneous = {}
return self._miscellaneous
def getMiscellaneousGroupList(self):
return self.getMiscellaneousGroups().values()
def getMiscellaneousGroupById(self, id):
if self.getMiscellaneousGroups().has_key(id):
return self.getMiscellaneousGroups()[id]
return None
def addMiscellaneousGroup(self, g):
if not self.getMiscellaneousGroups().has_key(g.getId()):
self.getMiscellaneousGroups()[g.getId()] = g
self.notifyModification()
def setSessionBillingEnabled(self, v):
self._sessionBillingEnabled = v
def isSessionBillingEnabled(self):
try:
return self._sessionBillingEnabled
except:
self.setSessionBillingEnabled(False)
return self._sessionBillingEnabled
def getBilledForms(self):
"""
"""
forms = []
if self._accommodation:
forms.append(BilledItemsWrapper([self._accommodation]))
if self._socialEvents:
forms.append(BilledItemsWrapper(self._socialEvents))
if self._sessions and self.isSessionBillingEnabled():
forms.append(BilledItemsWrapper(self._sessions))
return forms
def getStatuses(self):
try:
if self._statuses:
pass
except AttributeError, e:
self._statuses = {}
return self._statuses
def getStatusesList(self):
return self.getStatuses().values()
def addStatus(self, s):
self.getStatuses()[s.getId()] = s
self.notifyModification()
def removeStatus(self, s):
if self.getStatuses().has_key(s.getId()):
del self.getStatuses()[s.getId()]
self.notifyModification()
def getStatusById(self, id):
v = self.getStatuses().get(id, None)
if v is None:
st = self._conf.getRegistrationForm().getStatusById(id)
v = RegistrantStatus(self, st)
if st.getDefaultValue() is not None:
v.setStatusValue(st.getDefaultValue())
self.addStatus(v)
return v
def setModificationDate(self):
pass
def getAttachments(self):
try:
if self._attachments:
pass
except AttributeError:
self._attachments = {}
return self._attachments
def getAttachmentList(self):
return self.getAttachments().values()
def getAttachmentById(self, id):
return self.getAttachments().get(id, None)
def _getAttachmentsCounter(self):
try:
if self._attachmentsCounter:
pass
except AttributeError:
self._attachmentsCounter = Counter()
return self._attachmentsCounter.newCount()
def __addFile(self, file):
file.archive(self.getConference()._getRepository())
self.getAttachments()[file.getId()] = file
self.notifyModification()
def saveFile(self, fileUploaded):
from MaKaC.conference import LocalFile
cfg = Config.getInstance()
tempPath = cfg.getUploadedFilesTempDir()
tempFileName = tempfile.mkstemp(suffix="IndicoRegistrant.tmp", dir=tempPath)[1]
f = open(tempFileName, "wb")
f.write(fileUploaded.file.read())
f.close()
file = LocalFile()
file.setFileName(fileUploaded.filename)
file.setFilePath(tempFileName)
file.setOwner(self)
file.setId(self._getAttachmentsCounter())
self.__addFile(file)
return file
def deleteFile(self, fileId):
file = self.getAttachments()[fileId]
file.delete()
del self.getAttachments()[fileId]
self.notifyModification()
def removeResource(self, res):
"""Necessary because LocalFile.delete (see _deleteFile) is calling this method.
In our case, nothing to do.
"""
pass
def canUserModify(self, user):
return self.getConference().canUserModify(user) or (user is not None and user == self.getAvatar())
class BilledItemsWrapper(object):
def __init__(self, items):
self._items = items
def getBilledItems(self):
return [item.getBilledItem() for item in self._items if item.isBillable() and not item.isCancelled()]
class BilledItem(object):
def __init__(self, caption, price, quantity, currency):
self._caption = caption
self._price = price
self._quantity = quantity
self._currency = currency
def getCaption(self):
return self._caption
def getPrice(self):
return float(self._price)
def getQuantity(self):
return self._quantity
def getCurrency(self):
return self._currency
class Accommodation(Persistent):
def __init__(self, reg=None):
self._registrant = reg
self._arrivalDate = None
self._departureDate = None
self._accommodationType = None
self._price = 0
self._billable = False
self._currency = ""
def isCancelled(self):
return self._accommodationType.isCancelled()
def getRegistrant(self):
try:
return self._registrant
except:
return None
def setRegistrant(self, reg):
self._registrant = reg
def getArrivalDate(self):
return self._arrivalDate
def setArrivalDate(self, ad):
self._arrivalDate = ad
def getDepartureDate(self):
return self._departureDate
def setDepartureDate(self, dd):
self._departureDate = dd
def getNights(self):
return (self._departureDate - self._arrivalDate).days
def getPrice(self):
try:
return self._price
except:
return 0
def isBillable(self):
try:
return self._billable
except:
return False
def getCurrency(self):
try:
return self._currency
except:
self._currency = self._regForm.getCurrency()
return self._currency
def getBilledItem(self):
return BilledItem(self._accommodationType.getCaption(), self.getPrice(), self.getNights(), self.getCurrency())
def getAccommodationType(self):
return self._accommodationType
def setAccommodationType(self, at):
if self.getAccommodationType() != at:
if self.getAccommodationType() is not None:
self.getAccommodationType().decreaseNoPlaces()
if at is not None:
at.increaseNoPlaces()
self._price = at.getPrice()
self._billable = at.isBillable()
self._currency = at.getCurrency()
else:
self._price = 0
self._billable = False
self._currency = ""
self._accommodationType = at
class SocialEvent(Persistent, Fossilizable):
fossilizes(IRegFormSocialEventFossil)
def __init__(self, se, noPlaces, reg=None):
self._registrant = None
self.addSEItem(se, noPlaces)
def addSEItem(self, se, noPlaces):
self._socialEventItem = se
self._noPlaces = noPlaces
self._socialEventItem.increaseNoPlaces(noPlaces)
self._price = self._socialEventItem.getPrice()
self._pricePerPlace = self._socialEventItem.isPricePerPlace()
self._billable = self._socialEventItem.isBillable()
self._currency = self._socialEventItem.getCurrency()
def getRegistrant(self):
try:
return self._registrant
except:
return None
def setRegistrant(self, reg):
self._registrant = reg
def getNoPlaces(self):
return self._noPlaces
def getCurrency(self):
try:
return self._currency
except:
self._currency = self._socialEventItem.getCurrency()
return self._currency
def getPrice(self):
try:
return self._price
except:
return 0
def isBillable(self):
try:
return self._billable
except:
return False
def isPricePerPlace(self):
try:
return self._pricePerPlace
except:
return False
def getBilledItem(self):
quantity = 1
if self._pricePerPlace:
quantity = self.getNoPlaces()
return BilledItem(self.getCaption(), self.getPrice(), quantity, self.getCurrency())
def getSocialEventItem(self):
return self._socialEventItem
def getId(self):
return self._socialEventItem.getId()
def isCancelled(self):
return self._socialEventItem.isCancelled()
def getCancelledReason(self):
return self._socialEventItem.getCancelledReason()
def getCaption(self):
return self._socialEventItem.getCaption()
def getMaxPlacePerRegistrant(self):
return self._socialEventItem.getMaxPlacePerRegistrant()
def delete(self):
self._socialEventItem.decreaseNoPlaces(self._noPlaces)
class RegistrantSession(Persistent):
def __init__(self, ses, reg=None):
self._regSession = ses
self._registrant = reg
self._price = self._regSession.getPrice()
self._billable = self._regSession.isBillable()
self._currency = self._regSession.getCurrency()
def getRegistrant(self):
return self._registrant
def setRegistrant(self, reg):
self._registrant = reg
def getCurrency(self):
if not hasattr(self, "_currency") or not self._currency:
self._currency = self._regSession.getCurrency()
return self._currency
def getPrice(self):
try:
return self._price
except:
return 0
def isBillable(self):
try:
return self._billable
except:
return False
def getBilledItem(self):
return BilledItem(self.getCaption(), self.getPrice(), 1, self.getCurrency())
def getRegSession(self):
return self._regSession
def getSession(self):
return self._regSession.getSession()
def getId(self):
return self._regSession.getId()
def getCaption(self):
return self._regSession.getCaption()
getTitle = getCaption
def getCode(self):
return self._regSession.getCode()
def isCancelled(self):
return self._regSession.isCancelled()
class MiscellaneousInfoGroup(Persistent, Fossilizable):
fossilizes(IRegFormMiscellaneousInfoGroupFossil)
def __init__(self, reg, gs):
self._registrant = reg
self._generalSection = gs
self._id = gs.getId()
self._responseItems = {}
def getId(self):
return self._id
def getGeneralSection(self):
return self._generalSection
def getTitle(self):
return self.getGeneralSection().getTitle()
def getRegistrant(self):
return self._registrant
def getResponseItems(self):
return self._responseItems
def getResponseItemList(self):
return self._responseItems.values()
def addResponseItem(self, r):
self._responseItems[r.getId()] = r
self.notifyModification()
def removeResponseItem(self, i):
if self.getResponseItems().has_key(i.getId()):
del self._responseItems[i.getId()]
self.notifyModification()
def getResponseItemById(self, id):
if self._responseItems.has_key(id):
return self._responseItems[id]
return None
def clearResponses(self, gs=None):
if gs is None:
self._responseItems = {}
self.notifyModification()
else:
#Mods to support sorting fields
#for f in gs.getFields():
for f in gs.getSortedFields():
self.removeResponseItem(f)
def getLocator(self):
"""Gives back (Locator) a globaly unique identification encapsulated in
a Locator object for the MiscellaneousInfoGroup instance """
lconf = self.getRegistrant().getLocator()
lconf["miscInfoId"] = self.getId()
return lconf
def notifyModification(self):
self._p_changed = 1
class MiscellaneousInfoSimpleItem(Persistent):
def __init__(self, group, field):
self._group = group
self._generalField = field
self._id = field.getId()
self._value = None
self._billable = False
self._price = 0.0
self._quantity = 0
self._currency = ""
self._mandatory = False
# TODO: When migrate to new database, take into account that HTMLName cannot be empty string
self._HTMLName = ""
def getHTMLName(self):
try:
if self._HTMLName == "":
self._HTMLName = self.getGeneralField().getInput().getHTMLName()
except:
self._HTMLName = ""
return self._HTMLName
def setHTMLName(self, HTMLName):
self._HTMLName = HTMLName
def isMandatory(self):
try:
return self._mandatory
except:
self._mandatory = False
return self._mandatory
def setMandatory(self, mandatory):
self._mandatory = mandatory
def getCurrency(self):
try:
return self._currency
except:
self.setCurrency("")
return self._currency
def setCurrency(self, currency):
self._currency = currency
def getQuantity(self):
try:
return self._quantity
except:
self.setQuantity(0)
return self._quantity
def setQuantity(self, quantity):
self._quantity = quantity
def isBillable(self):
try:
return self._billable
except:
self.setBillable(False)
return self._billable
def setBillable(self, v):
self._billable = v
def getPrice(self):
try:
return self._price
except:
self.setPrice(0)
return self._price
def setPrice(self, price):
self._price = price
def getId(self):
return self._id
def getGeneralField(self):
return self._generalField
def getCaption(self):
return self._generalField.getCaption()
def getOwner(self):
return self._group
getGroup = getOwner
def getValue(self):
return self._value
def setValue(self, v):
self._value = v
class RegistrantStatus(Persistent):
def __init__(self, reg, st, data=None):
self._status = st
self._registrant = reg
self._value = None
if data is not None:
self.setValues()
def setValues(self, d):
self.setStatusValue(d.get("statusvalue", ""))
def getValues(self):
d = {}
d["statusvalue"] = self.getStatusValue()
return d
def getId(self):
return self._status.getId()
def getCaption(self):
return self._status.getCaption()
def getStatusValue(self):
if not self._status.hasStatusValue(self._value):
self._value = self._status.getDefaultValue()
return self._value
def setStatusValue(self, v):
self._value = v
class RegistrantMapping(object):
def __init__(self, registrant):
self._registrant = registrant
self._regDict = {
"FirstName": self._registrant.getFirstName,
"LastName": self._registrant.getSurName,
"Institution": self._registrant.getInstitution,
"Position": self._registrant.getPosition,
"Phone": self._registrant.getPhone,
"City": self._registrant.getCity,
"Address": self._registrant.getAddress,
"Email": self._registrant.getEmail,
"isPayed": self._registrant.isPayedText,
"idpayment": self._registrant.getIdPay,
"Country": self._getCountry,
"amountToPay": self._getAmountToPay,
"Accommodation": self._getAccomodation,
"SocialEvents": self._getSocialEvents,
"ReasonParticipation": self._getReasonParticipation,
"RegistrationDate": self._getRegistrationDate,
"Sessions": self._getSessions,
"DepartureDate": self._getDepartureDate,
"ArrivalDate": self._getArrivalDate,
"checkedIn": self._getCheckedIn,
"checkInDate": self._getCheckInDate
}
def __getitem__(self, key):
if self._regDict.has_key(key):
return self._regDict[key]()
elif re.match("s-[0-9]+$", key):
return self._getStatus(key[2:])
elif re.match("[0-9]+$", key):
return self._getGroup(key)
elif re.match("[0-9]+-[0-9]+$", key):
dashPos = key.find('-')
return self._getItem(key[:dashPos], key[dashPos + 1:])
else:
return " "
def _getCountry(self):
return CountryHolder().getCountryById(self._registrant.getCountry())
def _getAmountToPay(self):
return "%.2f %s" % (self._registrant.getTotal(), self._registrant.getConference().getRegistrationForm().getCurrency())
def _getAccomodation(self):
if self._registrant.getAccommodation() is not None:
if self._registrant.getAccommodation().getAccommodationType() is not None:
return self._registrant.getAccommodation().getAccommodationType().getCaption()
return ""
def _getDepartureDate(self):
accomodation = self._registrant.getAccommodation()
if accomodation is not None:
departure_date = accomodation.getDepartureDate()
if departure_date is not None:
return format_date(departure_date)
return ""
def _getArrivalDate(self):
accomodation = self._registrant.getAccommodation()
if accomodation is not None:
arrival_date = accomodation.getArrivalDate()
if arrival_date is not None:
return format_date(arrival_date)
return ""
def _getSocialEvents(self):
events = self._registrant.getSocialEvents()
items = ["%s (%s)" % (item.getCaption(), item.getNoPlaces()) for item in events ]
return "<br>".join(items)
def _getReasonParticipation(self):
return self._registrant.getReasonParticipation() or ""
def _getRegistrationDate(self):
registration_date = self._registrant.getAdjustedRegistrationDate()
if registration_date is not None:
return format_datetime(registration_date)
else:
return i18nformat("""-- _("date unknown")--""")
def _getSessions(self):
sessions = self._registrant.getSessionList()
return "<br>".join([sess.getTitle() for sess in sessions])
def _getStatus(self, id):
st = self._registrant.getStatusById(id)
if st.getStatusValue() is not None:
return st.getStatusValue().getCaption()
else:
return i18nformat("""<span style="white-space:nowrap">-- _("not set") --</span>""")
def _getGroup(self, groupId):
if self._registrant.getMiscellaneousGroupById(groupId):
return self._registrant.getMiscellaneousGroupById(groupId).getTitle()
else:
return ""
def _formatValue(self, fieldInput, value):
try:
value = fieldInput.getValueDisplay(value)
except:
value = str(value).strip()
return value
def _getItem(self, groupId, itemId):
if self._registrant.getMiscellaneousGroupById(groupId) and \
self._registrant.getMiscellaneousGroupById(groupId).getResponseItemById(itemId):
item = self._registrant.getMiscellaneousGroupById(groupId).getResponseItemById(itemId)
return self._formatValue(item.getGeneralField().getInput(), item.getValue())
else:
return ""
def _getCheckedIn(self):
conf = self._registrant.getConference()
if not conf.getRegistrationForm().getETicket().isEnabled():
return "-"
elif self._registrant.isCheckedIn():
return _("Yes")
else:
return _("No")
def _getCheckInDate(self):
checkInDate = self._registrant.getAdjustedCheckInDate()
if checkInDate:
return format_datetime(checkInDate)
else:
return "-"
| gpl-3.0 | 7,992,295,334,285,262,000 | 33.9931 | 250 | 0.575081 | false |
jake1036/spider | scrapy/contrib/spidermiddleware/offsite.py | 23 | 2084 | """
Offsite Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
import re
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
from scrapy import log
class OffsiteMiddleware(object):
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def process_spider_output(self, response, result, spider):
for x in result:
if isinstance(x, Request):
if x.dont_filter or self.should_follow(x, spider):
yield x
else:
domain = urlparse_cached(x).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
log.msg(format="Filtered offsite request to %(domain)r: %(request)s",
level=log.DEBUG, spider=spider, domain=domain, request=x)
self.stats.inc_value('offsite/domains', spider=spider)
self.stats.inc_value('offsite/filtered', spider=spider)
else:
yield x
def should_follow(self, request, spider):
regex = self.host_regex
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ''
return bool(regex.search(host))
def get_host_regex(self, spider):
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, 'allowed_domains', None)
if not allowed_domains:
return re.compile('') # allow all by default
regex = r'^(.*\.)?(%s)$' % '|'.join(re.escape(d) for d in allowed_domains if d is not None)
return re.compile(regex)
def spider_opened(self, spider):
self.host_regex = self.get_host_regex(spider)
self.domains_seen = set()
| bsd-3-clause | -2,681,305,780,648,502,300 | 35.561404 | 99 | 0.601727 | false |
kball/ambry | ambry/database/partition.py | 1 | 4876 | """
Copyright (c) 2013 Clarinova. This file is licensed under the terms of the
Revised BSD License, included in this distribution as LICENSE.txt
"""
from .sqlite import SqliteDatabase, SqliteAttachmentMixin #@UnresolvedImport
from .relational import RelationalPartitionDatabaseMixin, RelationalDatabase #@UnresolvedImport
from inserter import ValueInserter, ValueUpdater
class PartitionDb(SqliteDatabase, RelationalPartitionDatabaseMixin, SqliteAttachmentMixin):
'''a database for a partition file. Partition databases don't have a full schema
and can load tables as they are referenced, by copying them from the prototype. '''
def __init__(self, bundle, partition, base_path, memory = False, **kwargs):
''''''
RelationalPartitionDatabaseMixin._init(self,bundle,partition)
self.memory = memory
super(PartitionDb, self).__init__(base_path, memory=self.memory, **kwargs)
self._session = None
assert partition.identity.extension() == self.EXTENSION, (
"Identity extension '{}' not same as db extension '{}' for database {}".format(
partition.identity.extension(), self.EXTENSION, type(self)
))
def query(self,*args, **kwargs):
"""Convenience function for self.connection.execute()"""
from sqlalchemy.exc import OperationalError
from ..dbexceptions import QueryError
if isinstance(args[0], basestring):
fd = { x:x for x in self._attachments }
args = (args[0].format(**fd),) + args[1:]
try:
return self.connection.execute(*args, **kwargs)
except OperationalError as e:
raise QueryError("Error while executing {} in database {} ({}): {}".format(args, self.dsn, type(self), e.message))
def inserter(self, table_or_name=None,**kwargs):
if not self.exists():
raise Exception("Database doesn't exist yet: '{}'".format(self.dsn))
if table_or_name is None and self.partition.table is not None:
table_or_name = self.partition.get_table()
if isinstance(table_or_name, basestring):
table_name = table_or_name
if not table_name in self.inspector.get_table_names():
t_meta, table = self.bundle.schema.get_table_meta(table_name) #@UnusedVariable
table.create(bind=self.engine)
if not table_name in self.inspector.get_table_names():
raise Exception("Don't have table "+table_name)
table = self.table(table_name)
else:
table = self.table(table_or_name.name)
return ValueInserter(self, self.bundle, table , **kwargs)
def updater(self, table_or_name=None,**kwargs):
if table_or_name is None and self.partition.table is not None:
table_or_name = self.partition.table
if isinstance(table_or_name, basestring):
table_name = table_or_name
if not table_name in self.inspector.get_table_names():
t_meta, table = self.bundle.schema.get_table_meta(table_name) #@UnusedVariable
table.create(bind=self.engine)
if not table_name in self.inspector.get_table_names():
raise Exception("Don't have table "+table_name)
table = self.table(table_name)
else:
table = self.table(table_or_name.name)
return ValueUpdater(self, self.bundle, table , **kwargs)
def _on_create_connection(self, connection):
'''Called from get_connection() to update the database'''
super(PartitionDb, self)._on_create_connection(connection)
_on_connect_partition(connection, None)
def _on_create_engine(self, engine):
from sqlalchemy import event
super(PartitionDb, self)._on_create_engine(engine)
# This looks like it should be connected to the listener, but it causes
# I/O errors, so it is in _on_create_connection
#event.listen(self._engine, 'connect', _on_connect_partition)
def create(self):
from ambry.orm import Dataset
'''Like the create() for the bundle, but this one also copies
the dataset and makes and entry for the partition '''
self.require_path()
SqliteDatabase._create(self) # Creates the database file
if RelationalDatabase._create(self):
self.post_create()
def _on_connect_partition(dbapi_con, con_record):
'''ISSUE some Sqlite pragmas when the connection is created'''
from sqlite import _on_connect_bundle as ocb
ocb(dbapi_con, con_record)
#dbapi_con.enable_load_extension(True)
| bsd-2-clause | 4,705,634,450,038,241,000 | 35.38806 | 126 | 0.616694 | false |
songww/blog.flexops.org | manager.py | 1 | 5281 | #!/usr/bin/env python
# coding:utf8
from gevent import monkey
monkey.patch_all()
import os
import myweb
import json
from io import open
from hashlib import md5
from md2html import md2html # , md2body
from mdparser import FlexopsParser
from datetime import datetime
from config import configs, urls
from utils import SQLAStore
from models import Post, Tag, User, WebSession, load_sqla
myweb.config.debug = False
app = myweb.application(urls, globals())
app.add_processor(load_sqla)
myweb.config.session_parameters['cookie_name'] = 'flexops'
myweb.config.session_parameters['timeout'] = 600
myweb.config.session_parameters['ignore_expiry'] = True
myweb.config.session_parameters['ignore_change_ip'] = False
myweb.config.session_parameters['expired_message'] = 'Session expired'
session = myweb.session.Session(app, SQLAStore(WebSession))
render = myweb.template.Template()
# tags = myweb.ctx.orm.query(Tag).all()
# render.global_vars({'tags': tags})
render.global_vars({'config': configs})
render.global_vars({'navs': myweb.utils.group(configs['navs'], 2)})
def markdown(text):
FP = FlexopsParser(text)
return FP.body()
render.global_vars({'markdown': markdown})
def checkLogged():
if session.get('logined', 0) == 0:
return False
else:
return True
class Home:
def GET(self):
postsQuery = myweb.ctx.orm.query(Post).order_by('modified desc')
page = myweb.input(page=1).page-1
pageSize = myweb.input(pageSize=10).pageSize
if page > 1:
posts = postsQuery.offset(page*pageSize).limit(pageSize).all()
else:
posts = postsQuery.limit(pageSize).all()
return render.start(posts=posts)
class Views:
def GET(self, postId):
post = myweb.ctx.orm.query(Post).filter_by(id=postId).first()
if post:
f = os.path.join('articles', post.filename)
else:
raise myweb.NotFound
# mdFile = codecs.open(f, mode='r', encoding='utf8')
mdFile = open(f, encoding='utf-8')
content = mdFile.read()
html = md2html(content)
mdFile.close()
# html = md2html('articles/' + post.filename)
return render.view(html=html, post=post)
class View:
def GET(self, post_id):
post = myweb.ctx.orm.query(Post).filter_by(id=post_id).first()
if post:
f = os.path.join('articles', post.filename)
else:
raise myweb.NotFound()
# mdFile = codecs.open(f, mode='r', encoding='utf8')
mdFile = open(f, encoding='utf-8')
content = mdFile.read()
FP = FlexopsParser(content)
mdFile.close()
# try:
# html = FP.parse2html()
# except Exception as exp:
# print(exp)
# FP = FlexopsParser(content)
# html = {'body': FP.html()}
html = FP.parse2html()
return render.view(html=html, post=post)
class GetTags:
def GET(self, tag):
posts = myweb.ctx.orm.query(Tag).filter_by(name=tag).first()
return render.tags(posts=posts)
class About:
def GET(self):
return render.about()
class Admin:
def GET(self):
if checkLogged():
postsQuery = myweb.ctx.orm.query(Post).order_by('modified desc')
posts = postsQuery.all()
return render.admin(posts=posts)
else:
raise myweb.seeother('/login')
def POST(self):
pass
class Login:
def GET(self):
return render.login()
def POST(self):
user = myweb.input().username
passwd = myweb.input().passwd
passwd = md5(passwd).hexdigest()
check = myweb.ctx.orm.query(User).filter_by(
email=user, passwd=passwd
).first()
if check and passwd == check.passwd:
session.logined = 1
session.user = user
raise myweb.seeother('/server')
else:
session.logined = 0
raise myweb.seeother('/login')
class AddPost:
def POST(self):
title = myweb.input().title
tags = myweb.input().tags.split()
mdName = myweb.input().filename
mdSummary = myweb.input().summary
fileSrc = myweb.input().mdfile.decode('utf8')
newPost = Post(
title=title,
upload=datetime.now(),
modified=datetime.now(),
summary=mdSummary,
filename=mdName
)
myweb.ctx.orm.add(newPost)
myweb.ctx.orm.flush()
for tag in tags:
checkTag = myweb.ctx.orm.query(Tag).filter_by(name=tag).all()
if not checkTag:
newTags = Tag(name=tag)
myweb.ctx.orm.add(newTags)
myweb.ctx.orm.flush()
newPost.tags.append(newTags)
fout = open('articles/'+mdName, 'w')
fout.write(fileSrc)
fout.close()
return json.dumps({
'success': True
})
class EditPosts:
def GET(self, id):
return json.dumps({
'success': True,
'action': 'action'
})
application = app.wsgifunc()
if __name__ == '__main__':
from gevent.pywsgi import WSGIServer
WSGIServer(('', 8000), application).serve_forever()
# app.run()
| gpl-3.0 | -4,105,969,910,964,282,400 | 25.537688 | 76 | 0.592123 | false |
tongwang01/tensorflow | tensorflow/contrib/slim/python/slim/nets/resnet_v2.py | 27 | 13235 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer.
Another difference is that 'v2' ResNets do not include an activation function in
the main pathway. Also see [2; Fig. 4e].
Typical use:
from tensorflow.contrib.slim.nets import resnet_v2
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope(is_training)):
net, end_points = resnet_v2.resnet_v2_101(inputs, 1000)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v2.resnet_arg_scope(is_training)):
net, end_points = resnet_v2.resnet_v2_101(inputs,
21,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.slim.nets import resnet_utils
slim = tf.contrib.slim
resnet_arg_scope = resnet_utils.resnet_arg_scope
@slim.add_arg_scope
def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1,
outputs_collections=None, scope=None):
"""Bottleneck residual unit variant with BN before convolutions.
This is the full preactivation residual unit variant proposed in [2]. See
Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck
variant which has an extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact')
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride,
normalizer_fn=None, activation_fn=None,
scope='shortcut')
residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
normalizer_fn=None, activation_fn=None,
scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_v2(inputs,
blocks,
num_classes=None,
global_pool=True,
output_stride=None,
include_root_block=True,
reuse=None,
scope=None):
"""Generator for v2 (preactivation) ResNet models.
This function generates a family of ResNet v2 models. See the resnet_v2_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks. If None
we return the features before the logit layer.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it. If excluded, `inputs` should be the
results of an activation-less convolution.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is None, then
net is the output of the last ResNet block, potentially after global
average pooling. If num_classes is not None, net contains the pre-softmax
activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
# We do not include batch normalization or activation functions in conv1
# because the first ResNet unit will perform these. Cf. Appendix of [2].
with slim.arg_scope([slim.conv2d],
activation_fn=None, normalizer_fn=None):
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride)
# This is needed because the pre-activation variant does not have batch
# normalization or activation functions in the residual unit output. See
# Appendix of [2].
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm')
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
if num_classes is not None:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if num_classes is not None:
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v2.default_image_size = 224
def resnet_v2_50(inputs,
num_classes=None,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 5 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, global_pool, output_stride,
include_root_block=True, reuse=reuse, scope=scope)
def resnet_v2_101(inputs,
num_classes=None,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_101'):
"""ResNet-101 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 3 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 22 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, global_pool, output_stride,
include_root_block=True, reuse=reuse, scope=scope)
def resnet_v2_152(inputs,
num_classes=None,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_152'):
"""ResNet-152 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 7 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, global_pool, output_stride,
include_root_block=True, reuse=reuse, scope=scope)
def resnet_v2_200(inputs,
num_classes=None,
global_pool=True,
output_stride=None,
reuse=None,
scope='resnet_v2_200'):
"""ResNet-200 model of [2]. See resnet_v2() for arg and return description."""
blocks = [
resnet_utils.Block(
'block1', bottleneck, [(256, 64, 1)] * 2 + [(256, 64, 2)]),
resnet_utils.Block(
'block2', bottleneck, [(512, 128, 1)] * 23 + [(512, 128, 2)]),
resnet_utils.Block(
'block3', bottleneck, [(1024, 256, 1)] * 35 + [(1024, 256, 2)]),
resnet_utils.Block(
'block4', bottleneck, [(2048, 512, 1)] * 3)]
return resnet_v2(inputs, blocks, num_classes, global_pool, output_stride,
include_root_block=True, reuse=reuse, scope=scope)
| apache-2.0 | -7,500,124,744,281,748,000 | 44.795848 | 80 | 0.636796 | false |
quarckster/cfme_tests | scripts/coverage_report_sprout.py | 2 | 1609 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import argparse
import diaper
from cfme.test_framework.sprout.client import SproutClient
from cfme.utils.appliance import IPAppliance
from coverage_report_jenkins import main as coverage_report_jenkins
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('jenkins_url')
parser.add_argument('jenkins_job_name')
parser.add_argument('version')
parser.add_argument('--jenkins-user', default=None)
parser.add_argument('--jenkins-token', default=None)
args = parser.parse_args()
# TODO: Upstream support
group = 'downstream-' + ''.join(args.version.split('.')[:2]) + 'z'
sprout = SproutClient.from_config()
print('requesting an appliance from sprout for {}/{}'.format(group, args.version))
pool_id = sprout.request_appliances(group, version=args.version)
print('Requested pool {}'.format(pool_id))
result = None
try:
while not result or not (result['fulfilled'] and result['finished']):
result = sprout.request_check(pool_id)
appliance_ip = result['appliances'][0]['ip_address']
print('received an appliance with IP address: {}'.format(appliance_ip))
with IPAppliance(hostname=appliance_ip) as appliance:
exit(
coverage_report_jenkins(
appliance,
args.jenkins_url,
args.jenkins_user,
args.jenkins_token,
args.jenkins_job_name))
finally:
with diaper:
sprout.destroy_pool(pool_id)
| gpl-2.0 | -8,014,877,579,074,386,000 | 37.309524 | 86 | 0.628962 | false |
trojkat/pylama_gjslint | pylama_gjslint/closure_linter/error_check.py | 27 | 3768 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Specific JSLint errors checker."""
import gflags as flags
FLAGS = flags.FLAGS
class Rule(object):
"""Different rules to check."""
# Documentations for specific rules goes in flag definition.
BLANK_LINES_AT_TOP_LEVEL = 'blank_lines_at_top_level'
INDENTATION = 'indentation'
WELL_FORMED_AUTHOR = 'well_formed_author'
NO_BRACES_AROUND_INHERIT_DOC = 'no_braces_around_inherit_doc'
BRACES_AROUND_TYPE = 'braces_around_type'
OPTIONAL_TYPE_MARKER = 'optional_type_marker'
VARIABLE_ARG_MARKER = 'variable_arg_marker'
UNUSED_PRIVATE_MEMBERS = 'unused_private_members'
UNUSED_LOCAL_VARIABLES = 'unused_local_variables'
# Rule to raise all known errors.
ALL = 'all'
# All rules that are to be checked when using the strict flag. E.g. the rules
# that are specific to the stricter Closure style.
CLOSURE_RULES = frozenset([BLANK_LINES_AT_TOP_LEVEL,
INDENTATION,
WELL_FORMED_AUTHOR,
NO_BRACES_AROUND_INHERIT_DOC,
BRACES_AROUND_TYPE,
OPTIONAL_TYPE_MARKER,
VARIABLE_ARG_MARKER])
flags.DEFINE_boolean('strict', False,
'Whether to validate against the stricter Closure style. '
'This includes ' + (', '.join(Rule.CLOSURE_RULES)) + '.')
flags.DEFINE_multistring('jslint_error', [],
'List of specific lint errors to check. Here is a list'
' of accepted values:\n'
' - ' + Rule.ALL + ': enables all following errors.\n'
' - ' + Rule.BLANK_LINES_AT_TOP_LEVEL + ': validates'
'number of blank lines between blocks at top level.\n'
' - ' + Rule.INDENTATION + ': checks correct '
'indentation of code.\n'
' - ' + Rule.WELL_FORMED_AUTHOR + ': validates the '
'@author JsDoc tags.\n'
' - ' + Rule.NO_BRACES_AROUND_INHERIT_DOC + ': '
'forbids braces around @inheritdoc JsDoc tags.\n'
' - ' + Rule.BRACES_AROUND_TYPE + ': enforces braces '
'around types in JsDoc tags.\n'
' - ' + Rule.OPTIONAL_TYPE_MARKER + ': checks correct '
'use of optional marker = in param types.\n'
' - ' + Rule.UNUSED_PRIVATE_MEMBERS + ': checks for '
'unused private variables.\n')
def ShouldCheck(rule):
"""Returns whether the optional rule should be checked.
Computes different flags (strict, jslint_error, jslint_noerror) to find out if
this specific rule should be checked.
Args:
rule: Name of the rule (see Rule).
Returns:
True if the rule should be checked according to the flags, otherwise False.
"""
if rule in FLAGS.jslint_error or Rule.ALL in FLAGS.jslint_error:
return True
# Checks strict rules.
return FLAGS.strict and rule in Rule.CLOSURE_RULES
| bsd-3-clause | -5,970,058,837,463,215,000 | 39.516129 | 80 | 0.596603 | false |
helixyte/TheLMA | thelma/repositories/rdb/schema/tables/supplierstructureannotation.py | 1 | 1213 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Supplier structure annotation table.
"""
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
__docformat__ = 'reStructuredText en'
__all__ = ['create_table']
def create_table(metadata, supplier_molecule_design_tbl,
chemical_structure_tbl):
"Table factory."
tbl = Table('supplier_structure_annotation', metadata,
Column('supplier_molecule_design_id', Integer,
ForeignKey(
supplier_molecule_design_tbl.c.supplier_molecule_design_id,
onupdate='CASCADE', ondelete='CASCADE'),
nullable=False, primary_key=True),
Column('chemical_structure_id', Integer,
ForeignKey(
chemical_structure_tbl.c.chemical_structure_id,
onupdate='CASCADE', ondelete='CASCADE'),
nullable=False, primary_key=True),
Column('annotation', String, nullable=False),
)
return tbl
| mit | -9,079,166,523,909,066,000 | 34.676471 | 80 | 0.657049 | false |
smc/silpa | src/silpa/modules/calendar/calendar.py | 3 | 6154 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Calendar Program
# Copyright 2008 Santhosh Thottingal <[email protected]>
# http://www.smc.org.in
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# If you find any bugs or have any suggestions email: [email protected]
# URL: http://www.smc.org.in
from common import *
from utils import *
import os,sys
import datetime
from sakacalendar import SakaCalendar
from astral import Astral
class Calendar(SilpaModule):
WEEKDAYS =["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"]
def __init__(self):
self.template=os.path.join(os.path.dirname(__file__), 'calendar.html')
self.astral =Astral()
@ServiceMethod
def date(self, calendar_system="Gregorian"):
if calendar_system == "Gregorian":
return dumps([datetime.date.today().year, datetime.date.today().month, datetime.date.today().day] )
if calendar_system == "Saka" :
sakacalendar = SakaCalendar()
greg= [datetime.date.today().year, datetime.date.today().month, datetime.date.today().day]
return dumps(sakacalendar.gregorian_to_saka_date(greg))
@ServiceMethod
def convert(self, from_calendar_system, to_calendar_system, year, month, day):
if from_calendar_system == to_calendar_system:
return dumps([year, month,day])
sakacalendar = SakaCalendar()
jd=None
year = int(year)
month = int(month)
day = int(day)
if from_calendar_system == "Gregorian":
jd=sakacalendar.gregorian_to_julian_date(year, month,day)
if from_calendar_system == "Saka":
jd=sakacalendar.saka_to_julian_date(year, month,day)
if to_calendar_system == "Gregorian":
return dumps(sakacalendar.julian_date_to_gregorian(jd))
if to_calendar_system == "Saka":
return dumps(sakacalendar.gregorian_to_saka_date(sakacalendar.julian_date_to_gregorian(jd)))
return "Not Implemented " + from_calendar_system + " -> " + to_calendar_system
@ServiceMethod
def panchanga(self, city_name, year,month,day):
date = datetime.date(int(year), int(month), int(day))
calendar_details={}
self.astral.solar_depression = 'civil'
city = self.astral[city_name]
calendar_details['Day of the week'] = self.WEEKDAYS[date.isoweekday()]
calendar_details['City Name'] = city_name
calendar_details['City Country'] = city.country
print('Information for %s/%s\n' % (city_name, city.country))
timezone = city.timezone
print('Timezone: %s' % timezone)
calendar_details['Timezone'] = timezone
calendar_details['Latitude'] = city.latitude
calendar_details['Longitude'] = city.longitude
print('Latitude: %.02f; Longitude: %.02f\n' % \
(city.latitude, city.longitude))
sun = city.sun(date=date, local=True)
calendar_details['Dawn'] = str(sun['dawn'])
calendar_details['Sunrise'] = str(sun['sunrise'])
calendar_details['Noon'] = str(sun['noon'])
calendar_details['Sunset'] = str(sun['sunset'])
calendar_details['Dusk'] = str(sun['dusk'])
print('Dawn: %s' % str(sun['dawn']))
print('Sunrise: %s' % str(sun['sunrise']))
print('Noon: %s' % str(sun['noon']))
print('Sunset: %s' % str(sun['sunset']))
print('Dusk: %s' % str(sun['dusk']))
rahukaalam = city.rahukaalam(date=date, local=True)
gulikakaalam = city.gulikakaalam(date=date, local=True)
yamakandakaalam = city.yamakandakaalam(date=date, local=True)
calendar_details['Rahukaalam'] = "from " + str(rahukaalam['start']) + " to " + str(rahukaalam['end'])
calendar_details['Gulikakaalam'] = "from " + str(gulikakaalam['start']) + " to " + str(gulikakaalam['end'])
calendar_details['Yamakandakaalam'] = "from " + str(yamakandakaalam['start']) + " to " + str(yamakandakaalam['end'])
print('Rahukaalam: %s' % "from " + str(rahukaalam['start']) + " to " + str(rahukaalam['end']))
print('Gulikakaalam: %s' % "from " + str(gulikakaalam['start']) + " to " + str(gulikakaalam['end']))
print('Yamakandakaalam: %s' % "from " + str(yamakandakaalam['start']) + " to " + str(yamakandakaalam['end']))
calendar_details['Kollavarsham(Malayalam Calendar)'] = "Not implemented now"
calendar_details['Tamil Calendar'] = "Not implemented now"
calendar_details['Bengali Calendar'] = "Not implemented now"
sakacalendar = SakaCalendar()
jd=sakacalendar.gregorian_to_julian_date(int(year), int(month),int(day) )
calendar_details['Saka Calendar'] = sakacalendar.gregorian_to_saka_date(sakacalendar.julian_date_to_gregorian(jd))
calendar_details['Oriya Calendar'] = "Not implemented now"
calendar_details['Nakshathra'] = "Not implemented now"
calendar_details['Thidhi'] = "Not implemented now"
return dumps(calendar_details)
def get_module_name(self):
return "Indic Calendar Systems"
def get_info(self):
return "Conversion and look up on Indic Calendars"
def getInstance():
return Calendar()
| agpl-3.0 | 6,164,151,750,504,642,000 | 51.051724 | 126 | 0.624959 | false |
Gustavo6046/ChatterBot | chatterbot/output/microsoft.py | 3 | 3289 | from __future__ import unicode_literals
import json
from .output_adapter import OutputAdapter
class Microsoft(OutputAdapter):
"""
An output adapter that allows a ChatterBot instance to send
responses to a Micorsoft bot using *Direct Line client protocol*.
"""
def __init__(self, **kwargs):
super(Microsoft, self).__init__(**kwargs)
self.directline_host = kwargs.get(
'directline_host',
'https://directline.botframework.com'
)
self.direct_line_token_or_secret = kwargs.get(
'direct_line_token_or_secret'
)
self.conversation_id = kwargs.get('conversation_id')
authorization_header = 'BotConnector {}'.format(
self.direct_line_token_or_secret
)
self.headers = {
'Authorization': authorization_header,
'Content-Type': 'application/json'
}
def _validate_status_code(self, response):
status_code = response.status_code
if status_code not in [200, 204]:
raise self.HTTPStatusException('{} status code recieved'.format(status_code))
def get_most_recent_message(self):
"""
Return the most recently sent message.
"""
import requests
endpoint = '{host}/api/conversations/{id}/messages'.format(
host=self.directline_host,
id=self.conversation_id
)
response = requests.get(
endpoint,
headers=self.headers,
verify=False
)
self.logger.info('{} retrieving most recent messages {}'.format(
response.status_code, endpoint
))
self._validate_status_code(response)
data = response.json()
if data['messages']:
last_msg = int(data['watermark'])
return data['messages'][last_msg - 1]
return None
def send_message(self, conversation_id, message):
"""
Send a message to a HipChat room.
https://www.hipchat.com/docs/apiv2/method/send_message
"""
import requests
message_url = "{host}/api/conversations/{conversationId}/messages".format(
host=self.directline_host,
conversationId=conversation_id
)
response = requests.post(
message_url,
headers=self.headers,
data=json.dumps({
'message': message
})
)
self.logger.info('{} sending message {}'.format(
response.status_code, message_url
))
self._validate_status_code(response)
# Microsoft return 204 on operation succeeded and no content was returned.
return self.get_most_recent_message()
def process_response(self, statement, session_id=None):
data = self.send_message(self.conversation_id, statement.text)
self.logger.info('processing user response {}'.format(data))
return statement
class HTTPStatusException(Exception):
"""
Exception raised when unexpected non-success HTTP
status codes are returned in a response.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| bsd-3-clause | 8,121,900,599,387,251,000 | 29.174312 | 89 | 0.584372 | false |
Yuudachimoe/HikariChun-RedBot | lib/youtube_dl/extractor/noovo.py | 20 | 3178 | # coding: utf-8
from __future__ import unicode_literals
from .brightcove import BrightcoveNewIE
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
smuggle_url,
try_get,
)
class NoovoIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?noovo\.ca/videos/(?P<id>[^/]+/[^/?#&]+)'
_TESTS = [{
# clip
'url': 'http://noovo.ca/videos/rpm-plus/chrysler-imperial',
'info_dict': {
'id': '5386045029001',
'ext': 'mp4',
'title': 'Chrysler Imperial',
'description': 'md5:de3c898d1eb810f3e6243e08c8b4a056',
'timestamp': 1491399228,
'upload_date': '20170405',
'uploader_id': '618566855001',
'creator': 'vtele',
'view_count': int,
'series': 'RPM+',
},
'params': {
'skip_download': True,
},
}, {
# episode
'url': 'http://noovo.ca/videos/l-amour-est-dans-le-pre/episode-13-8',
'info_dict': {
'id': '5395865725001',
'title': 'Épisode 13 : Les retrouvailles',
'description': 'md5:336d5ebc5436534e61d16e63ddfca327',
'ext': 'mp4',
'timestamp': 1492019320,
'upload_date': '20170412',
'uploader_id': '618566855001',
'creator': 'vtele',
'view_count': int,
'series': "L'amour est dans le pré",
'season_number': 5,
'episode': 'Épisode 13',
'episode_number': 13,
},
'params': {
'skip_download': True,
},
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/618566855001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
video_id = self._match_id(url)
data = self._download_json(
'http://api.noovo.ca/api/v1/pages/single-episode/%s' % video_id,
video_id)['data']
content = try_get(data, lambda x: x['contents'][0])
brightcove_id = data.get('brightcoveId') or content['brightcoveId']
series = try_get(
data, (
lambda x: x['show']['title'],
lambda x: x['season']['show']['title']),
compat_str)
episode = None
og = data.get('og')
if isinstance(og, dict) and og.get('type') == 'video.episode':
episode = og.get('title')
video = content or data
return {
'_type': 'url_transparent',
'ie_key': BrightcoveNewIE.ie_key(),
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': ['CA']}),
'id': brightcove_id,
'title': video.get('title'),
'creator': video.get('source'),
'view_count': int_or_none(video.get('viewsCount')),
'series': series,
'season_number': int_or_none(try_get(
data, lambda x: x['season']['seasonNumber'])),
'episode': episode,
'episode_number': int_or_none(data.get('episodeNumber')),
}
| gpl-3.0 | 5,730,699,699,077,663,000 | 31.731959 | 112 | 0.504882 | false |
MattTuri/M-Admin | MAdmin/utils/__init__.py | 1 | 1707 | from functools import wraps
import os
from os.path import dirname
from threading import Thread
from fabric.api import *
from flask import flash
from pbkdf2 import crypt
from vagrant import Vagrant
from MAdmin import db
from MAdmin.sql.ORM import Device
def hash_password(password):
return crypt(password)
def verify_password(password_hash, guessed_password):
if password_hash == crypt(guessed_password, password_hash):
# Password correct
return True
else:
# Password incorrect
return False
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"%s" % error)
def async(f):
@wraps(f)
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
@async
def make_box(hostname, device_os):
os.chdir(dirname(dirname(dirname(__file__))) + '/boxes')
if not os.path.exists(hostname):
os.mkdir(hostname)
os.chdir(hostname)
file_location = os.getcwd()
box = Vagrant(quiet_stderr=False, quiet_stdout=False, root=file_location)
try:
box.box_add('chef/centos-7.0', 'https://atlas.hashicorp.com/chef/boxes/centos-7.0')
box.box_add('ubuntu/trusty64', 'https://atlas.hashicorp.com/ubuntu/boxes/trusty64')
except:
print 'Box probably already exists'
box.init(box_name=device_os)
box.up()
env.hosts = [box.user_hostname_port()]
env.key_filename = box.keyfile()
env.disable_known_hosts = True
device = Device.query.filter(Device.hostname == hostname).first()
device.box_online = True
device.pending_boot = False
db.session.commit()
| mit | -7,899,169,037,185,777,000 | 23.73913 | 91 | 0.667252 | false |
keedio/hue | desktop/core/ext-py/South-1.0.2/south/models.py | 53 | 1504 | from django.db import models
from south.db import DEFAULT_DB_ALIAS
# If we detect Django 1.7 or higher, then exit
# Placed here so it's guaranteed to be imported on Django start
import django
if django.VERSION[0] > 1 or (django.VERSION[0] == 1 and django.VERSION[1] > 6):
raise RuntimeError("South does not support Django 1.7 or higher. Please use native Django migrations.")
class MigrationHistory(models.Model):
app_name = models.CharField(max_length=255)
migration = models.CharField(max_length=255)
applied = models.DateTimeField(blank=True)
@classmethod
def for_migration(cls, migration, database):
try:
# Switch on multi-db-ness
if database != DEFAULT_DB_ALIAS:
# Django 1.2
objects = cls.objects.using(database)
else:
# Django <= 1.1
objects = cls.objects
return objects.get(
app_name=migration.app_label(),
migration=migration.name(),
)
except cls.DoesNotExist:
return cls(
app_name=migration.app_label(),
migration=migration.name(),
)
def get_migrations(self):
from south.migration.base import Migrations
return Migrations(self.app_name)
def get_migration(self):
return self.get_migrations().migration(self.migration)
def __str__(self):
return "<%s: %s>" % (self.app_name, self.migration)
| apache-2.0 | 4,636,501,161,922,135,000 | 33.976744 | 107 | 0.605718 | false |
OpenBfS/dokpool-plone | Plone/src/elan.esd/elan/esd/content/elandoccollection.py | 1 | 10722 | # -*- coding: utf-8 -*-
#
# File: elandoccollection.py
#
# Copyright (c) 2016 by Bundesamt für Strahlenschutz
# Generator: ConPD2
# http://www.condat.de
#
from __future__ import print_function
__author__ = ''
__docformat__ = 'plaintext'
"""Definition of the ELANDocCollection content type. See elandoccollection.py for more
explanation on the statements below.
"""
from AccessControl import ClassSecurityInfo
from docpool.elan.config import ELAN_APP
from docpool.event.utils import getScenariosForCurrentUser
from elan.esd import DocpoolMessageFactory as _
from elan.esd.utils import getCategoriesForCurrentUser
from plone.app.contenttypes.content import Collection
from plone.app.contenttypes.content import ICollection
from plone.autoform import directives
from plone.dexterity.content import Item
from plone.supermodel import model
from plone.protect.interfaces import IDisableCSRFProtection
from Products.CMFCore.permissions import View
from Products.CMFCore.utils import getToolByName
from z3c.relationfield.event import updateRelations
from z3c.relationfield.relation import RelationValue
from z3c.relationfield.schema import RelationChoice
from z3c.relationfield.schema import RelationList
from zope.component import adapter
from zope.component import getUtility
from zope.interface import alsoProvides
from zope.interface import implementer
from zope.intid.interfaces import IIntIds
from zope.lifecycleevent.interfaces import IObjectAddedEvent
from zope.lifecycleevent.interfaces import IObjectModifiedEvent
class IELANDocCollection(model.Schema, ICollection):
"""
"""
docTypes = RelationList(
title=_(
u'label_elandoccollection_doctypes',
default=u'Document Types'),
description=_(u'description_elandoccollection_doctypes', default=u''),
required=False,
value_type=RelationChoice(
title=_("Document Types"), source="docpool.base.vocabularies.DocType"
),
)
directives.widget(docTypes='z3c.form.browser.select.CollectionSelectFieldWidget')
# directives.widget(docTypes=AutocompleteMultiFieldWidget)
@implementer(IELANDocCollection)
class ELANDocCollection(Item, Collection):
"""
"""
security = ClassSecurityInfo()
def testSearch(self):
"""
"""
kw = {
'portal_type': {'query': ['DPDocument']},
'sort_on': 'mdate',
'dp_type': {'query': ['eventinformation', 'nppinformation']},
'scenarios': {'query': ['scenario2', 'scenario1']},
'sort_order': 'reverse',
'path': {'query': '/Plone/Members'},
}
res = self.portal_catalog(**kw)
# print len(res)
for r in res:
print(r.Title)
def getUserSelectedScenarios(self):
"""
"""
uss = getScenariosForCurrentUser(self)
# print usc
return uss
def getUserSelectedCategories(self):
"""
"""
usc = getCategoriesForCurrentUser(self)
# print usc
return usc
def results(self, batch=True, b_start=0,
b_size=10, sort_on=None, brains=False):
"""Get results override, implicit = True"""
if sort_on is None:
sort_on = self.sort_on
return self.getQuery(
implicit=True,
batch=batch,
b_start=b_start,
b_size=b_size,
sort_on=sort_on,
brains=brains,
)
def correctDocTypes(self):
"""
Replace references to global doc types with references to local doc types.
"""
request = self.REQUEST
alsoProvides(request, IDisableCSRFProtection)
dts = self.docTypes
res = []
intids = getUtility(IIntIds)
if dts:
for dt in dts:
t = dt.to_object
new = None
if t:
tid = t.getId()
try:
new = self.config.dtypes[tid]
except BaseException:
pass
if new:
to_id = intids.getId(new)
res.append(RelationValue(to_id))
self.docTypes = res
updateRelations(self, None)
self.setDocTypesUpdateCollection()
self.reindexObject()
def setDocTypesUpdateCollection(self, values=None):
"""
Update the criteria for the underlying collection.
"""
if values:
self.docTypes = values
# We always search for ELAN content
params = [
{
'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.is',
'v': ['DPDocument', 'SituationReport', 'SRModule'],
}
]
# We usually also have document types configured
# This returns the corresponding Type Object(s)
types = self.docTypes
if types:
params.append(
{
'i': 'dp_type',
'o': 'plone.app.querystring.operation.selection.is',
'v': [t.to_object.getId() for t in types if t.to_object],
}
) # getId() vorher
self.query = params
self.sort_on = 'changed'
self.sort_reversed = True
def isOverview(self):
"""
Is this an overview collection?
"""
return self.getId().find('overview') > -1
def dp_type(self):
"""
We use this index to mark those collections which actually serve as categories.
"""
# print self
if self.docTypes:
# print "active"
return "active"
else:
# print "inactive"
return "inactive"
security.declareProtected(View, 'synContentValues')
def synContentValues(self):
"""Getter for syndycation support
"""
syn_tool = getToolByName(self, 'portal_syndication')
limit = int(syn_tool.getMaxItems(self))
return self.getQuery(batch=False, brains=True, limit=limit)[:limit]
def getQuery(self, **kwargs):
"""Get the query dict from the request or from the object"""
from zope.site.hooks import getSite
from plone.app.querystring.querybuilder import QueryBuilder
# print "modified get"
request = self.REQUEST
alsoProvides(request, IDisableCSRFProtection)
raw = kwargs.get('raw', None)
implicit_filter = kwargs.get('implicit', False)
value = self.query # .raw
if not value:
self.setDocTypesUpdateCollection() # Not yet initialized
value = self.query
# print value
if raw == True:
# We actually wanted the raw value, should have called getRaw
return value
querybuilder = QueryBuilder(self, getSite().REQUEST)
if implicit_filter:
# Not in the archive:
value = list(value[:]) # Otherwise we change the stored query!
if not self.isArchive():
# First implicit filter: the user has select scenario(s) as a
# filter
uss = self.getUserSelectedScenarios()
if uss:
# This is THE modification: append the implicit criterion
# for the scenario(s)
value.append(
{
'i': 'scenarios',
'o': 'plone.app.querystring.operation.selection.is',
'v': uss,
}
)
else: # If nothing selected, don't show results!
value.append(
{
'i': 'scenarios',
'o': 'plone.app.querystring.operation.selection.is',
'v': ["dontfindanything"],
}
)
# print value
# Second implicit filter: the user has selected categories as a filter
# Used for the chronological overview
if self.isOverview():
usc = self.getUserSelectedCategories()
if usc:
value.append(
{
'i': 'category',
'o': 'plone.app.querystring.operation.selection.is',
'v': usc,
}
)
# Third implicit filter: only results with ELAN support are wanted.
value.append(
{
'i': 'apps_supported',
'o': 'plone.app.querystring.operation.selection.is',
'v': [ELAN_APP],
}
)
# Now we restrict the search to the paths to Members and Groups.
# This ensures that in case of archives we only get results from the correct subset.
# m = self.content
# mpath = getRelativePath(m)
mpath = "content"
# Just one path allowed in the path criterion. Must be the part
# after the portal root, e.g. '/Members'
value.append(
{
'i': 'path',
'o': 'plone.app.querystring.operation.string.path',
'v': "/%s" % mpath,
}
)
sort_on = kwargs.get('sort_on', self.sort_on)
sort_order = 'reverse' if self.sort_reversed else 'ascending'
limit = kwargs.get('limit', self.limit)
# print value
res = querybuilder(
query=value,
batch=kwargs.get('batch', False),
b_start=kwargs.get('b_start', 0),
b_size=kwargs.get('b_size', 30),
sort_on=sort_on,
sort_order=sort_order,
limit=limit,
brains=kwargs.get('brains', False),
)
# print len(res)
return res
@adapter(IELANDocCollection, IObjectModifiedEvent)
def update_docTypes(obj, event=None):
"""
"""
if obj:
# print "update_docTypes", obj.docTypes
obj.setDocTypesUpdateCollection()
obj.reindexObject()
@adapter(IELANDocCollection, IObjectAddedEvent)
def enableSyndication(obj, event=None):
syn_tool = getToolByName(obj, 'portal_syndication', None)
if syn_tool is not None:
if syn_tool.isSiteSyndicationAllowed() and not syn_tool.isSyndicationAllowed(
obj
):
syn_tool.enableSyndication(obj)
| gpl-3.0 | 1,418,396,289,838,343,000 | 32.503125 | 96 | 0.552467 | false |
opennode/nodeconductor-openstack | src/waldur_openstack/openstack_tenant/apps.py | 1 | 7476 | from django.apps import AppConfig
from django.db.models import signals
from django_fsm import signals as fsm_signals
class OpenStackTenantConfig(AppConfig):
""" OpenStack is a toolkit for building private and public clouds.
This application adds support for managing OpenStack tenant resources -
instances, volumes and snapshots.
"""
name = 'waldur_openstack.openstack_tenant'
label = 'openstack_tenant'
verbose_name = 'OpenStackTenant'
service_name = 'OpenStackTenant'
def ready(self):
from waldur_core.quotas.fields import QuotaField, TotalQuotaField
from waldur_core.structure.models import ServiceSettings, Project, Customer
from waldur_core.structure import SupportedServices
from waldur_openstack.openstack.models import Tenant
from .backend import OpenStackTenantBackend
from . import handlers, models
SupportedServices.register_backend(OpenStackTenantBackend)
# Initialize service settings quotas based on tenant.
for quota in Tenant.get_quotas_fields():
ServiceSettings.add_quota_field(
name=quota.name,
quota_field=QuotaField(
is_backend=True,
default_limit=quota.default_limit,
creation_condition=lambda service_settings:
service_settings.type == OpenStackTenantConfig.service_name
)
)
Project.add_quota_field(
name='os_cpu_count',
quota_field=TotalQuotaField(
target_models=[models.Instance],
path_to_scope='service_project_link.project',
target_field='cores',
)
)
Project.add_quota_field(
name='os_ram_size',
quota_field=TotalQuotaField(
target_models=[models.Instance],
path_to_scope='service_project_link.project',
target_field='ram',
)
)
Project.add_quota_field(
name='os_storage_size',
quota_field=TotalQuotaField(
target_models=[models.Volume, models.Snapshot],
path_to_scope='service_project_link.project',
target_field='size',
)
)
Customer.add_quota_field(
name='os_cpu_count',
quota_field=TotalQuotaField(
target_models=[models.Instance],
path_to_scope='service_project_link.project.customer',
target_field='cores',
)
)
Customer.add_quota_field(
name='os_ram_size',
quota_field=TotalQuotaField(
target_models=[models.Instance],
path_to_scope='service_project_link.project.customer',
target_field='ram',
)
)
Customer.add_quota_field(
name='os_storage_size',
quota_field=TotalQuotaField(
target_models=[models.Volume, models.Snapshot],
path_to_scope='service_project_link.project.customer',
target_field='size',
)
)
for Resource in (models.Instance, models.Volume, models.Snapshot):
name = Resource.__name__.lower()
signals.post_save.connect(
handlers.log_action,
sender=Resource,
dispatch_uid='openstack_tenant.handlers.log_%s_action' % name,
)
for handler in handlers.resource_handlers:
model = handler.resource_model
name = model.__name__.lower()
fsm_signals.post_transition.connect(
handler.create_handler,
sender=model,
dispatch_uid='openstack_tenant.handlers.create_%s' % name,
)
fsm_signals.post_transition.connect(
handler.update_handler,
sender=model,
dispatch_uid='openstack_tenant.handlers.update_%s' % name,
)
signals.post_delete.connect(
handler.delete_handler,
sender=model,
dispatch_uid='openstack_tenant.handlers.delete_%s' % name,
)
signals.post_save.connect(
handlers.log_backup_schedule_creation,
sender=models.BackupSchedule,
dispatch_uid='openstack_tenant.handlers.log_backup_schedule_creation',
)
signals.post_save.connect(
handlers.log_backup_schedule_action,
sender=models.BackupSchedule,
dispatch_uid='openstack_tenant.handlers.log_backup_schedule_action',
)
signals.pre_delete.connect(
handlers.log_backup_schedule_deletion,
sender=models.BackupSchedule,
dispatch_uid='openstack_tenant.handlers.log_backup_schedule_deletion',
)
signals.post_save.connect(
handlers.log_snapshot_schedule_creation,
sender=models.SnapshotSchedule,
dispatch_uid='openstack_tenant.handlers.log_snapshot_schedule_creation',
)
signals.post_save.connect(
handlers.log_snapshot_schedule_action,
sender=models.SnapshotSchedule,
dispatch_uid='openstack_tenant.handlers.log_snapshot_schedule_action',
)
signals.pre_delete.connect(
handlers.log_snapshot_schedule_deletion,
sender=models.SnapshotSchedule,
dispatch_uid='openstack_tenant.handlers.log_snapshot_schedule_deletion',
)
signals.post_save.connect(
handlers.update_service_settings_credentials,
sender=Tenant,
dispatch_uid='openstack_tenant.handlers.update_service_settings_credentials',
)
signals.post_save.connect(
handlers.update_service_settings,
sender=Tenant,
dispatch_uid='openstack_tenant.handlers.update_service_settings',
)
signals.m2m_changed.connect(
handlers.sync_certificates_between_openstack_service_with_openstacktenant_service,
sender=ServiceSettings.certifications.through,
dispatch_uid='openstack_tenant.handlers.'
'sync_certificates_between_openstack_service_with_openstacktenant_service',
)
signals.post_save.connect(
handlers.copy_certifications_from_openstack_service_to_openstacktenant_service,
sender=ServiceSettings,
dispatch_uid='openstack_tenant.handlers.'
'copy_certifications_from_openstack_service_to_openstacktenant_service',
)
signals.post_save.connect(
handlers.copy_flavor_exclude_regex_to_openstacktenant_service_settings,
sender=ServiceSettings,
dispatch_uid='openstack_tenant.handlers.'
'copy_flavor_exclude_regex_to_openstacktenant_service_settings',
)
signals.post_save.connect(
handlers.create_service_from_tenant,
sender=Tenant,
dispatch_uid='openstack_tenant.handlers.create_service_from_tenant',
)
signals.post_save.connect(
handlers.sync_price_list_item_for_flavor,
sender=models.Flavor,
dispatch_uid='openstack_tenant.handlers.sync_price_list_item_for_flavor',
)
| mit | -6,473,897,027,792,262,000 | 36.009901 | 100 | 0.593232 | false |
zhreshold/mxnet | python/mxnet/numpy_extension/utils.py | 2 | 8362 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Util functions for the numpy module."""
import ctypes
from .. util import is_np_array, is_np_shape
from .. base import _LIB, check_call, string_types, c_str_array, DLPackHandle
from .. base import c_handle_array, c_str, mx_uint, NDArrayHandle, py_str
from ..numpy import ndarray
__all__ = ['save', 'load', 'to_dlpack_for_read', 'to_dlpack_for_write', 'from_dlpack']
PyCapsuleDestructor = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
_c_str_dltensor = c_str('dltensor')
_c_str_used_dltensor = c_str('used_dltensor')
def _dlpack_deleter(pycapsule):
pycapsule = ctypes.c_void_p(pycapsule)
if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):
ptr = ctypes.c_void_p(
ctypes.pythonapi.PyCapsule_GetPointer(pycapsule, _c_str_dltensor))
check_call(_LIB.MXNDArrayCallDLPackDeleter(ptr))
_c_dlpack_deleter = PyCapsuleDestructor(_dlpack_deleter)
def save(file, arr):
"""Saves a list of `ndarray`s or a dict of `str`->`ndarray` to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
file : str
Filename to which the data is saved.
arr : `ndarray` or list of `ndarray`s or dict of `str` to `ndarray`
The data to be saved.
Notes
-----
This function can only be called within numpy semantics, i.e., `npx.is_np_shape()`
and `npx.is_np_array()` must both return true.
"""
if not (is_np_shape() and is_np_array()):
raise ValueError('Cannot save `mxnet.numpy.ndarray` in legacy mode. Please activate'
' numpy semantics by calling `npx.set_np()` in the global scope'
' before calling this function.')
if isinstance(arr, ndarray):
arr = [arr]
if isinstance(arr, dict):
str_keys = arr.keys()
nd_vals = arr.values()
if any(not isinstance(k, string_types) for k in str_keys) or \
any(not isinstance(v, ndarray) for v in nd_vals):
raise TypeError('Only accepts dict str->ndarray or list of ndarrays')
keys = c_str_array(str_keys)
handles = c_handle_array(nd_vals)
elif isinstance(arr, list):
if any(not isinstance(v, ndarray) for v in arr):
raise TypeError('Only accepts dict str->ndarray or list of ndarrays')
keys = None
handles = c_handle_array(arr)
else:
raise ValueError("data needs to either be a ndarray, dict of (str, ndarray) pairs "
"or a list of ndarrays.")
check_call(_LIB.MXNDArraySave(c_str(file),
mx_uint(len(handles)),
handles,
keys))
def load(file):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
file : str
The filename.
Returns
-------
result : list of ndarrays or dict of str -> ndarray
Data stored in the file.
Notes
-----
This function can only be called within numpy semantics, i.e., `npx.is_np_shape()`
and `npx.is_np_array()` must both return true.
"""
if not (is_np_shape() and is_np_array()):
raise ValueError('Cannot load `mxnet.numpy.ndarray` in legacy mode. Please activate'
' numpy semantics by calling `npx.set_np()` in the global scope'
' before calling this function.')
if not isinstance(file, string_types):
raise TypeError('file required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(file),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [ndarray(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), ndarray(NDArrayHandle(handles[i])))
for i in range(out_size.value))
def from_dlpack(dlpack):
"""Returns a np.ndarray backed by a dlpack tensor.
Parameters
----------
dlpack: PyCapsule (the pointer of DLManagedTensor)
input data
Returns
-------
np.ndarray
an ndarray backed by a dlpack tensor
Examples
--------
>>> x = mx.np.ones((2,3))
>>> y = mx.npx.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.npx.from_dlpack(y)
>>> type(z)
<class 'mxnet.numpy.ndarray'>
>>> z
array([[1., 1., 1.],
[1., 1., 1.]])
>>> w = mx.npx.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.npx.from_dlpack(w)
>>> u += 1
>>> x
array([[2., 2., 2.],
[2., 2., 2.]])
"""
handle = NDArrayHandle()
dlpack = ctypes.py_object(dlpack)
assert ctypes.pythonapi.PyCapsule_IsValid(dlpack, _c_str_dltensor), ValueError(
'Invalid DLPack Tensor. DLTensor capsules can be consumed only once.')
dlpack_handle = ctypes.c_void_p(ctypes.pythonapi.PyCapsule_GetPointer(dlpack, _c_str_dltensor))
check_call(_LIB.MXNDArrayFromDLPackEx(dlpack_handle, False, ctypes.byref(handle)))
# Rename PyCapsule (DLPack)
ctypes.pythonapi.PyCapsule_SetName(dlpack, _c_str_used_dltensor)
# delete the deleter of the old dlpack
ctypes.pythonapi.PyCapsule_SetDestructor(dlpack, None)
return ndarray(handle=handle)
def to_dlpack_for_read(data):
"""Returns a reference view of np.ndarray that represents as DLManagedTensor until
all previous write operations on the current array are finished.
Parameters
----------
data: np.ndarray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view of ndarray that represents as DLManagedTensor.
Examples
--------
>>> x = mx.np.ones((2,3))
>>> y = mx.npx.to_dlpack_for_read(x)
>>> type(y)
<class 'PyCapsule'>
>>> z = mx.npx.from_dlpack(y)
>>> z
array([[1., 1., 1.],
[1., 1., 1.]])
"""
data.wait_to_read()
dlpack = DLPackHandle()
check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack)))
return ctypes.pythonapi.PyCapsule_New(dlpack, _c_str_dltensor, _c_dlpack_deleter)
def to_dlpack_for_write(data):
"""Returns a reference view of ndarray that represents as DLManagedTensor until
all previous read/write operations on the current array are finished.
Parameters
----------
data: np.ndarray
input data.
Returns
-------
PyCapsule (the pointer of DLManagedTensor)
a reference view of np.ndarray that represents as DLManagedTensor.
Examples
--------
>>> x = mx.np.ones((2,3))
>>> w = mx.npx.to_dlpack_for_write(x)
>>> type(w)
<class 'PyCapsule'>
>>> u = mx.npx.from_dlpack(w)
>>> u += 1
>>> x
array([[2., 2., 2.],
[2., 2., 2.]])
"""
check_call(_LIB.MXNDArrayWaitToWrite(data.handle))
dlpack = DLPackHandle()
check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack)))
return ctypes.pythonapi.PyCapsule_New(dlpack, _c_str_dltensor, _c_dlpack_deleter)
| apache-2.0 | -4,836,169,322,193,987,000 | 33.553719 | 99 | 0.609184 | false |
maebert/jrnl | jrnl/util.py | 1 | 4405 | #!/usr/bin/env python
import sys
import os
import getpass as gp
import yaml
if "win32" in sys.platform:
import colorama
colorama.init()
import re
import tempfile
import subprocess
import unicodedata
import shlex
import logging
log = logging.getLogger(__name__)
WARNING_COLOR = "\033[33m"
ERROR_COLOR = "\033[31m"
RESET_COLOR = "\033[0m"
# Based on Segtok by Florian Leitner
# https://github.com/fnl/segtok
SENTENCE_SPLITTER = re.compile(r"""
( # A sentence ends at one of two sequences:
[.!?\u203C\u203D\u2047\u2048\u2049\u3002\uFE52\uFE57\uFF01\uFF0E\uFF1F\uFF61] # Either, a sequence starting with a sentence terminal,
[\'\u2019\"\u201D]? # an optional right quote,
[\]\)]* # optional closing brackets and
\s+ # a sequence of required spaces.
| # Otherwise,
\n # a sentence also terminates newlines.
)""", re.VERBOSE)
class UserAbort(Exception):
pass
getpass = gp.getpass
def get_password(validator, keychain=None, max_attempts=3):
pwd_from_keychain = keychain and get_keychain(keychain)
password = pwd_from_keychain or getpass()
result = validator(password)
# Password is bad:
if result is None and pwd_from_keychain:
set_keychain(keychain, None)
attempt = 1
while result is None and attempt < max_attempts:
print("Wrong password, try again.", file=sys.stderr)
password = gp.getpass()
result = validator(password)
attempt += 1
if result is not None:
return result
else:
print("Extremely wrong password.", file=sys.stderr)
sys.exit(1)
def get_keychain(journal_name):
import keyring
try:
return keyring.get_password('jrnl', journal_name)
except RuntimeError:
return ""
def set_keychain(journal_name, password):
import keyring
if password is None:
try:
keyring.delete_password('jrnl', journal_name)
except RuntimeError:
pass
else:
keyring.set_password('jrnl', journal_name, password)
def yesno(prompt, default=True):
prompt = f"{prompt.strip()} {'[Y/n]' if default else '[y/N]'} "
response = input(prompt)
return {"y": True, "n": False}.get(response.lower(), default)
def load_config(config_path):
"""Tries to load a config file from YAML.
"""
with open(config_path) as f:
return yaml.load(f, Loader=yaml.FullLoader)
def scope_config(config, journal_name):
if journal_name not in config['journals']:
return config
config = config.copy()
journal_conf = config['journals'].get(journal_name)
if type(journal_conf) is dict: # We can override the default config on a by-journal basis
log.debug('Updating configuration with specific journal overrides %s', journal_conf)
config.update(journal_conf)
else: # But also just give them a string to point to the journal file
config['journal'] = journal_conf
config.pop('journals')
return config
def get_text_from_editor(config, template=""):
filehandle, tmpfile = tempfile.mkstemp(prefix="jrnl", text=True, suffix=".txt")
with open(tmpfile, 'w', encoding="utf-8") as f:
if template:
f.write(template)
try:
subprocess.call(shlex.split(config['editor'], posix="win" not in sys.platform) + [tmpfile])
except AttributeError:
subprocess.call(config['editor'] + [tmpfile])
with open(tmpfile, "r", encoding="utf-8") as f:
raw = f.read()
os.close(filehandle)
os.remove(tmpfile)
if not raw:
print('[Nothing saved to file]', file=sys.stderr)
return raw
def colorize(string):
"""Returns the string wrapped in cyan ANSI escape"""
return f"\033[36m{string}\033[39m"
def slugify(string):
"""Slugifies a string.
Based on public domain code from https://github.com/zacharyvoase/slugify
"""
normalized_string = str(unicodedata.normalize('NFKD', string))
no_punctuation = re.sub(r'[^\w\s-]', '', normalized_string).strip().lower()
slug = re.sub(r'[-\s]+', '-', no_punctuation)
return slug
def split_title(text):
"""Splits the first sentence off from a text."""
punkt = SENTENCE_SPLITTER.search(text)
if not punkt:
return text, ""
return text[:punkt.end()].strip(), text[punkt.end():].strip()
| mit | 4,895,603,301,916,000,000 | 28.965986 | 152 | 0.638138 | false |
hungtt57/matchmaker | lib/python2.7/site-packages/ndg/httpsclient/utils.py | 3 | 15748 | """Utilities using NDG HTTPS Client, including a main module that can be used to
fetch from a URL.
"""
__author__ = "R B Wilkinson"
__date__ = "09/12/11"
__copyright__ = "(C) 2011 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = '$Id$'
import logging
from optparse import OptionParser
import os
import sys
if sys.version_info[0] > 2:
import http.cookiejar as cookiejar_
import http.client as http_client_
from urllib.request import Request as Request_
from urllib.request import HTTPHandler as HTTPHandler_
from urllib.request import HTTPCookieProcessor as HTTPCookieProcessor_
from urllib.request import HTTPBasicAuthHandler as HTTPBasicAuthHandler_
from urllib.request import HTTPPasswordMgrWithDefaultRealm as \
HTTPPasswordMgrWithDefaultRealm_
from urllib.request import ProxyHandler as ProxyHandler_
from urllib.error import HTTPError as HTTPError_
import urllib.parse as urlparse_
else:
import cookielib as cookiejar_
import httplib as http_client_
from urllib2 import Request as Request_
from urllib2 import HTTPHandler as HTTPHandler_
from urllib2 import HTTPCookieProcessor as HTTPCookieProcessor_
from urllib2 import HTTPBasicAuthHandler as HTTPBasicAuthHandler_
from urllib2 import HTTPPasswordMgrWithDefaultRealm as \
HTTPPasswordMgrWithDefaultRealm_
from urllib2 import ProxyHandler as ProxyHandler_
from urllib2 import HTTPError as HTTPError_
import urlparse as urlparse_
from ndg.httpsclient.urllib2_build_opener import build_opener
from ndg.httpsclient.https import HTTPSContextHandler
from ndg.httpsclient import ssl_context_util
log = logging.getLogger(__name__)
class AccumulatingHTTPCookieProcessor(HTTPCookieProcessor_):
"""Cookie processor that adds new cookies (instead of replacing the existing
ones as HTTPCookieProcessor does)
"""
def http_request(self, request):
"""Processes cookies for a HTTP request.
@param request: request to process
@type request: urllib2.Request
@return: request
@rtype: urllib2.Request
"""
COOKIE_HEADER_NAME = "Cookie"
tmp_request = Request_(request.get_full_url(), request.data, {},
request.origin_req_host,
request.unverifiable)
self.cookiejar.add_cookie_header(tmp_request)
# Combine existing and new cookies.
new_cookies = tmp_request.get_header(COOKIE_HEADER_NAME)
if new_cookies:
if request.has_header(COOKIE_HEADER_NAME):
# Merge new cookies with existing ones.
old_cookies = request.get_header(COOKIE_HEADER_NAME)
merged_cookies = '; '.join([old_cookies, new_cookies])
request.add_unredirected_header(COOKIE_HEADER_NAME,
merged_cookies)
else:
# No existing cookies so just set new ones.
request.add_unredirected_header(COOKIE_HEADER_NAME, new_cookies)
return request
# Process cookies for HTTPS in the same way.
https_request = http_request
class URLFetchError(Exception):
"""Error fetching content from URL"""
def fetch_from_url(url, config, data=None, handlers=None):
"""Returns data retrieved from a URL.
@param url: URL to attempt to open
@type url: basestring
@param config: SSL context configuration
@type config: Configuration
@return data retrieved from URL or None
"""
return_code, return_message, response = open_url(url, config, data=data,
handlers=handlers)
if return_code and return_code == http_client_.OK:
return_data = response.read()
response.close()
return return_data
else:
raise URLFetchError(return_message)
def fetch_from_url_to_file(url, config, output_file, data=None, handlers=None):
"""Writes data retrieved from a URL to a file.
@param url: URL to attempt to open
@type url: basestring
@param config: SSL context configuration
@type config: Configuration
@param output_file: output file
@type output_file: basestring
@return: tuple (
returned HTTP status code or 0 if an error occurred
returned message
boolean indicating whether access was successful)
"""
return_code, return_message, response = open_url(url, config, data=data,
handlers=handlers)
if return_code == http_client_.OK:
return_data = response.read()
response.close()
outfile = open(output_file, "w")
outfile.write(return_data)
outfile.close()
return return_code, return_message, return_code == http_client_.OK
def fetch_stream_from_url(url, config, data=None, handlers=None):
"""Returns data retrieved from a URL.
@param url: URL to attempt to open
@type url: basestring
@param config: SSL context configuration
@type config: Configuration
@param data: HTTP POST data
@type data: str
@param handlers: list of custom urllib2 handlers to add to the request
@type handlers: iterable
@return: data retrieved from URL or None
@rtype: file derived type
"""
return_code, return_message, response = open_url(url, config, data=data,
handlers=handlers)
if return_code and return_code == http_client_.OK:
return response
else:
raise URLFetchError(return_message)
def open_url(url, config, data=None, handlers=None):
"""Attempts to open a connection to a specified URL.
@param url: URL to attempt to open
@param config: SSL context configuration
@type config: Configuration
@param data: HTTP POST data
@type data: str
@param handlers: list of custom urllib2 handlers to add to the request
@type handlers: iterable
@return: tuple (
returned HTTP status code or 0 if an error occurred
returned message or error description
response object)
"""
debuglevel = 1 if config.debug else 0
# Set up handlers for URL opener.
if config.cookie:
cj = config.cookie
else:
cj = cookiejar_.CookieJar()
# Use a cookie processor that accumulates cookies when redirects occur so
# that an application can redirect for authentication and retain both any
# cookies for the application and the security system (c.f.,
# urllib2.HTTPCookieProcessor which replaces cookies).
cookie_handler = AccumulatingHTTPCookieProcessor(cj)
if not handlers:
handlers = []
handlers.append(cookie_handler)
if config.debug:
http_handler = HTTPHandler_(debuglevel=debuglevel)
https_handler = HTTPSContextHandler(config.ssl_context,
debuglevel=debuglevel)
handlers.extend([http_handler, https_handler])
if config.http_basicauth:
# currently only supports http basic auth
auth_handler = HTTPBasicAuthHandler_(HTTPPasswordMgrWithDefaultRealm_())
auth_handler.add_password(realm=None, uri=url,
user=config.http_basicauth[0],
passwd=config.http_basicauth[1])
handlers.append(auth_handler)
# Explicitly remove proxy handling if the host is one listed in the value of
# the no_proxy environment variable because urllib2 does use proxy settings
# set via http_proxy and https_proxy, but does not take the no_proxy value
# into account.
if not _should_use_proxy(url, config.no_proxy):
handlers.append(ProxyHandler_({}))
log.debug("Not using proxy")
elif config.proxies:
handlers.append(ProxyHandler_(config.proxies))
log.debug("Configuring proxies: %s" % config.proxies)
opener = build_opener(*handlers, ssl_context=config.ssl_context)
headers = config.headers
if headers is None:
headers = {}
request = Request_(url, data, headers)
# Open the URL and check the response.
return_code = 0
return_message = ''
response = None
# FIXME
response = opener.open(request)
try:
response = opener.open(request)
return_message = response.msg
return_code = response.code
if log.isEnabledFor(logging.DEBUG):
for index, cookie in enumerate(cj):
log.debug("%s : %s", index, cookie)
except HTTPError_ as exc:
return_code = exc.code
return_message = "Error: %s" % exc.msg
if log.isEnabledFor(logging.DEBUG):
log.debug("%s %s", exc.code, exc.msg)
except Exception as exc:
return_message = "Error: %s" % exc.__str__()
if log.isEnabledFor(logging.DEBUG):
import traceback
log.debug(traceback.format_exc())
return (return_code, return_message, response)
def _should_use_proxy(url, no_proxy=None):
"""Determines whether a proxy should be used to open a connection to the
specified URL, based on the value of the no_proxy environment variable.
@param url: URL
@type url: basestring or urllib2.Request
"""
if no_proxy is None:
no_proxy_effective = os.environ.get('no_proxy', '')
else:
no_proxy_effective = no_proxy
urlObj = urlparse_.urlparse(_url_as_string(url))
for np in [h.strip() for h in no_proxy_effective.split(',')]:
if urlObj.hostname == np:
return False
return True
def _url_as_string(url):
"""Returns the URL string from a URL value that is either a string or
urllib2.Request..
@param url: URL
@type url: basestring or urllib2.Request
@return: URL string
@rtype: basestring
"""
if isinstance(url, Request_):
return url.get_full_url()
elif isinstance(url, str):
return url
else:
raise TypeError("Expected type %r or %r" %
(str, Request_))
class Configuration(object):
"""Connection configuration.
"""
def __init__(self, ssl_context, debug=False, proxies=None, no_proxy=None,
cookie=None, http_basicauth=None, headers=None):
"""
@param ssl_context: SSL context to use with this configuration
@type ssl_context: OpenSSL.SSL.Context
@param debug: if True, output debugging information
@type debug: bool
@param proxies: proxies to use for
@type proxies: dict with basestring keys and values
@param no_proxy: hosts for which a proxy should not be used
@type no_proxy: basestring
@param cookie: cookies to set for request
@type cookie: cookielib.CookieJar (python 3 - http.cookiejar)
@param http_basicauth: http authentication, or None
@type http_basicauth: tuple of (username,password)
@param headers: http headers
@type headers: dict
"""
self.ssl_context = ssl_context
self.debug = debug
self.proxies = proxies
self.no_proxy = no_proxy
self.cookie = cookie
self.http_basicauth = http_basicauth
self.headers = headers
def main():
'''Utility to fetch data using HTTP or HTTPS GET from a specified URL.
'''
parser = OptionParser(usage="%prog [options] url")
parser.add_option("-c", "--certificate", dest="cert_file", metavar="FILE",
default=os.path.expanduser("~/credentials.pem"),
help="Certificate file - defaults to $HOME/credentials.pem")
parser.add_option("-k", "--private-key", dest="key_file", metavar="FILE",
default=None,
help="Private key file - defaults to the certificate file")
parser.add_option("-t", "--ca-certificate-dir", dest="ca_dir",
metavar="PATH",
default=None,
help="Trusted CA certificate file directory")
parser.add_option("-d", "--debug", action="store_true", dest="debug",
default=False,
help="Print debug information.")
parser.add_option("-p", "--post-data-file", dest="data_file",
metavar="FILE", default=None,
help="POST data file")
parser.add_option("-f", "--fetch", dest="output_file", metavar="FILE",
default=None, help="Output file")
parser.add_option("-n", "--no-verify-peer", action="store_true",
dest="no_verify_peer", default=False,
help="Skip verification of peer certificate.")
parser.add_option("-a", "--basicauth", dest="basicauth",
metavar="USER:PASSWD",
default=None,
help="HTTP authentication credentials")
parser.add_option("--header", action="append", dest="headers",
metavar="HEADER: VALUE",
help="Add HTTP header to request")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Incorrect number of arguments")
url = args[0]
if options.debug:
logging.getLogger().setLevel(logging.DEBUG)
if options.key_file and os.path.exists(options.key_file):
key_file = options.key_file
else:
key_file = None
if options.cert_file and os.path.exists(options.cert_file):
cert_file = options.cert_file
else:
cert_file = None
if options.ca_dir and os.path.exists(options.ca_dir):
ca_dir = options.ca_dir
else:
ca_dir = None
verify_peer = not options.no_verify_peer
if options.data_file and os.path.exists(options.data_file):
data_file = open(options.data_file)
data = data_file.read()
data_file.close()
else:
data = None
if options.basicauth:
http_basicauth = options.basicauth.split(':', 1)
else:
http_basicauth = None
headers = {}
if options.headers:
for h in options.headers:
key, val = h.split(':', 1)
headers[key.strip()] = val.lstrip()
# If a private key file is not specified, the key is assumed to be stored in
# the certificate file.
ssl_context = ssl_context_util.make_ssl_context(key_file,
cert_file,
None,
ca_dir,
verify_peer,
url)
config = Configuration(ssl_context,
options.debug,
http_basicauth=http_basicauth,
headers=headers)
if options.output_file:
return_code, return_message = fetch_from_url_to_file(
url,
config,
options.output_file,
data)[:2]
raise SystemExit(return_code, return_message)
else:
data = fetch_from_url(url, config)
print(data)
if __name__=='__main__':
logging.basicConfig()
main()
| mit | 196,997,015,403,678,720 | 37.038647 | 82 | 0.599568 | false |
openprocurement/openprocurement.auctions.dgf | openprocurement/auctions/dgf/tests/award.py | 1 | 16779 | # -*- coding: utf-8 -*-
import unittest
from datetime import timedelta
from openprocurement.auctions.core.utils import get_now
from openprocurement.auctions.core.tests.award import (
AuctionLotAwardResourceTestMixin,
Auction2LotAwardResourceTestMixin,
AuctionAwardDocumentResourceTestMixin,
AuctionLotAwardComplaintResourceTestMixin,
Auction2LotAwardComplaintResourceTestMixin,
AuctionAwardComplaintDocumentResourceTestMixin,
Auction2LotAwardComplaintDocumentResourceTestMixin,
Auction2LotAwardDocumentResourceTestMixin
)
from openprocurement.auctions.core.tests.base import snitch
from openprocurement.auctions.core.tests.blanks.award_blanks import (
get_auction_award_complaint,
get_auction_award_complaints
)
from openprocurement.auctions.core.plugins.awarding.v3.tests.award import (
AuctionAwardProcessTestMixin,
CreateAuctionAwardTestMixin
)
from openprocurement.auctions.dgf.tests.base import (
BaseAuctionWebTest, test_bids,
test_lots, test_financial_auction_data,
test_financial_bids, test_financial_organization,
)
class CreateAuctionAwardTest(BaseAuctionWebTest, CreateAuctionAwardTestMixin):
# initial_data = auction_data
initial_status = 'active.auction'
initial_bids = test_bids
class AuctionAwardProcessTest(BaseAuctionWebTest, AuctionAwardProcessTestMixin):
# initial_data = auction_data
initial_status = 'active.auction'
initial_bids = test_bids
docservice = True
def setUp(self):
super(AuctionAwardProcessTest, self).setUp()
authorization = self.app.authorization
self.app.authorization = ('Basic', ('auction', ''))
now = get_now()
auction_result = {
'bids': [
{
"id": b['id'],
"date": (now - timedelta(seconds=i)).isoformat(),
"value": b['value']
}
for i, b in enumerate(self.initial_bids)
]
}
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': auction_result})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual('active.qualification', auction["status"])
self.first_award = auction['awards'][0]
self.second_award = auction['awards'][1]
self.first_award_id = self.first_award['id']
self.second_award_id = self.second_award['id']
self.app.authorization = authorization
def upload_auction_protocol(self, award):
award_id = award['id']
bid_token = self.initial_bids_tokens[award['bid_id']]
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, bid_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('auction_protocol.pdf', response.json["data"]["title"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.patch_json(
'/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, award_id, doc_id, bid_token),
{"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertIn("documentType", response.json["data"])
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
response = self.app.post('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, self.auction_token), upload_files=[('file', 'auction_protocol.pdf', 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('auction_protocol.pdf', response.json["data"]["title"])
key = response.json["data"]["url"].split('?')[-1]
response = self.app.patch_json(
'/auctions/{}/awards/{}/documents/{}?acc_token={}'.format(self.auction_id, award_id, doc_id,
self.auction_token),
{"data": {
"description": "auction protocol",
"documentType": 'auctionProtocol'
}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertIn("documentType", response.json["data"])
self.assertEqual(response.json["data"]["documentType"], 'auctionProtocol')
response = self.app.get('/auctions/{}/awards/{}/documents'.format(self.auction_id, award_id, doc_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual('auctionProtocol', response.json["data"][0]["documentType"])
self.assertEqual('auction_protocol.pdf', response.json["data"][0]["title"])
self.assertEqual('bid_owner', response.json["data"][0]["author"])
self.assertEqual('auctionProtocol', response.json["data"][1]["documentType"])
self.assertEqual('auction_owner', response.json["data"][1]["author"])
@unittest.skip("option not available")
class AuctionLotAwardResourceTest(BaseAuctionWebTest, AuctionLotAwardResourceTestMixin):
initial_status = 'active.qualification'
initial_lots = test_lots
initial_bids = test_bids
@unittest.skip("option not available")
class Auction2LotAwardResourceTest(BaseAuctionWebTest, Auction2LotAwardResourceTestMixin):
initial_status = 'active.qualification'
initial_lots = 2 * test_lots
initial_bids = test_bids
# test_create_auction_award_2_lots = snitch(create_auction_award_2_lots)
# test_patch_auction_award_2_lots = snitch(patch_auction_award_2_lots)
@unittest.skip("option not available")
class AuctionLotAwardComplaintResourceTest(BaseAuctionWebTest,
AuctionLotAwardComplaintResourceTestMixin):
# initial_data = auction_data
initial_status = 'active.qualification'
initial_lots = test_lots
initial_bids = test_bids
def setUp(self):
super(AuctionLotAwardComplaintResourceTest, self).setUp()
# Create award
bid = self.initial_bids[0]
response = self.app.post_json('/auctions/{}/awards'.format(
self.auction_id), {
'data': {'suppliers': [self.initial_organization], 'status': 'pending', 'bid_id': bid['id'],
'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
@unittest.skip("option not available")
class Auction2LotAwardComplaintResourceTest(BaseAuctionWebTest,
Auction2LotAwardComplaintResourceTestMixin):
initial_status = 'active.qualification'
initial_lots = 2 * test_lots
initial_bids = test_bids
test_get_auction_award_complaint = snitch(get_auction_award_complaint)
test_get_auction_award_complaints = snitch(get_auction_award_complaints)
@unittest.skip("option not available")
class AuctionAwardComplaintDocumentResourceTest(BaseAuctionWebTest,
AuctionAwardComplaintDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_bids
def setUp(self):
super(AuctionAwardComplaintDocumentResourceTest, self).setUp()
# Create award
response = self.app.post_json('/auctions/{}/awards'.format(
self.auction_id), {'data': {'suppliers': [self.initial_organization], 'status': 'pending',
'bid_id': self.initial_bids[0]['id']}})
award = response.json['data']
self.award_id = award['id']
# Create complaint for award
response = self.app.post_json('/auctions/{}/awards/{}/complaints'.format(
self.auction_id, self.award_id), {
'data': {'title': 'complaint title', 'description': 'complaint description',
'author': self.initial_organization}})
complaint = response.json['data']
self.complaint_id = complaint['id']
self.complaint_owner_token = response.json['access']['token']
@unittest.skip("option not available")
class Auction2LotAwardComplaintDocumentResourceTest(BaseAuctionWebTest,
Auction2LotAwardComplaintDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_bids
initial_lots = 2 * test_lots
def setUp(self):
super(Auction2LotAwardComplaintDocumentResourceTest, self).setUp()
# Create award
bid = self.initial_bids[0]
response = self.app.post_json('/auctions/{}/awards'.format(
self.auction_id), {
'data': {'suppliers': [self.initial_organization], 'status': 'pending', 'bid_id': bid['id'],
'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
# Create complaint for award
response = self.app.post_json('/auctions/{}/awards/{}/complaints'.format(
self.auction_id, self.award_id), {
'data': {'title': 'complaint title', 'description': 'complaint description',
'author': self.initial_organization}})
complaint = response.json['data']
self.complaint_id = complaint['id']
self.complaint_owner_token = response.json['access']['token']
class AuctionAwardDocumentResourceTest(BaseAuctionWebTest,
AuctionAwardDocumentResourceTestMixin):
initial_status = 'active.auction'
initial_bids = test_bids
def setUp(self):
super(AuctionAwardDocumentResourceTest, self).setUp()
authorization = self.app.authorization
self.app.authorization = ('Basic', ('auction', ''))
now = get_now()
auction_result = {
'bids': [
{
"id": b['id'],
"date": (now - timedelta(seconds=i)).isoformat(),
"value": b['value']
}
for i, b in enumerate(self.initial_bids)
]
}
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': auction_result})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual('active.qualification', auction["status"])
self.first_award = auction['awards'][0]
self.second_award = auction['awards'][1]
self.first_award_id = self.first_award['id']
self.second_award_id = self.second_award['id']
self.app.authorization = authorization
# test_not_found_document = snitch(not_found_document)
# test_create_auction_award_document = snitch(create_auction_award_document)
# test_put_auction_award_document = snitch(put_auction_award_document)
# test_patch_auction_award_document = snitch(patch_auction_award_document)
@unittest.skip("option not available")
class Auction2LotAwardDocumentResourceTest(BaseAuctionWebTest,
Auction2LotAwardDocumentResourceTestMixin):
initial_status = 'active.qualification'
initial_bids = test_bids
initial_lots = 2 * test_lots
def setUp(self):
super(Auction2LotAwardDocumentResourceTest, self).setUp()
# Create award
bid = self.initial_bids[0]
response = self.app.post_json('/auctions/{}/awards'.format(
self.auction_id), {
'data': {'suppliers': [self.initial_organization], 'status': 'pending', 'bid_id': bid['id'],
'lotID': bid['lotValues'][0]['relatedLot']}})
award = response.json['data']
self.award_id = award['id']
class CreateFinancialAuctionAwardTest(CreateAuctionAwardTest):
initial_bids = test_financial_bids
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
class FinancialAuctionAwardProcessTest(AuctionAwardProcessTest):
initial_bids = test_financial_bids
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
@unittest.skip("option not available")
class FinancialAuctionLotAwardResourceTest(AuctionLotAwardResourceTest):
initial_bids = test_financial_bids
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
@unittest.skip("option not available")
class FinancialAuction2LotAwardResourceTest(Auction2LotAwardResourceTest):
initial_bids = test_financial_bids
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
@unittest.skip("option not available")
class FinancialAuctionLotAwardComplaintResourceTest(AuctionLotAwardComplaintResourceTest):
initial_bids = test_financial_bids
initial_data = test_financial_auction_data
@unittest.skip("option not available")
class FinancialAuction2LotAwardComplaintResourceTest(Auction2LotAwardComplaintResourceTest):
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
@unittest.skip("option not available")
class FinancialAuctionAwardComplaintDocumentResourceTest(AuctionAwardComplaintDocumentResourceTest):
initial_bids = test_financial_bids
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
@unittest.skip("option not available")
class FinancialAuction2LotAwardComplaintDocumentResourceTest(Auction2LotAwardComplaintDocumentResourceTest):
initial_bids = test_financial_bids
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
class FinancialAuctionAwardDocumentResourceTest(AuctionAwardDocumentResourceTest):
initial_bids = test_financial_bids
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
@unittest.skip("option not available")
class FinancialAuction2LotAwardDocumentResourceTest(Auction2LotAwardDocumentResourceTest):
initial_bids = test_financial_bids
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
def suite():
tests = unittest.TestSuite()
tests.addTest(unittest.makeSuite(CreateAuctionAwardTest))
tests.addTest(unittest.makeSuite(AuctionAwardProcessTest))
tests.addTest(unittest.makeSuite(AuctionLotAwardResourceTest))
tests.addTest(unittest.makeSuite(Auction2LotAwardResourceTest))
tests.addTest(unittest.makeSuite(AuctionLotAwardComplaintResourceTest))
tests.addTest(unittest.makeSuite(Auction2LotAwardComplaintResourceTest))
tests.addTest(unittest.makeSuite(AuctionAwardComplaintDocumentResourceTest))
tests.addTest(unittest.makeSuite(Auction2LotAwardComplaintDocumentResourceTest))
tests.addTest(unittest.makeSuite(AuctionAwardDocumentResourceTest))
tests.addTest(unittest.makeSuite(Auction2LotAwardDocumentResourceTest))
tests.addTest(unittest.makeSuite(CreateFinancialAuctionAwardTest))
tests.addTest(unittest.makeSuite(FinancialAuctionAwardProcessTest))
tests.addTest(unittest.makeSuite(FinancialAuctionLotAwardResourceTest))
tests.addTest(unittest.makeSuite(FinancialAuction2LotAwardResourceTest))
tests.addTest(unittest.makeSuite(FinancialAuctionLotAwardComplaintResourceTest))
tests.addTest(unittest.makeSuite(FinancialAuction2LotAwardComplaintResourceTest))
tests.addTest(unittest.makeSuite(FinancialAuctionAwardComplaintDocumentResourceTest))
tests.addTest(unittest.makeSuite(FinancialAuction2LotAwardComplaintDocumentResourceTest))
tests.addTest(unittest.makeSuite(FinancialAuctionAwardDocumentResourceTest))
tests.addTest(unittest.makeSuite(FinancialAuction2LotAwardDocumentResourceTest))
return tests
if __name__ == '__main__':
unittest.main(defaultTest='suite') | apache-2.0 | -7,997,447,289,590,466,000 | 43.391534 | 119 | 0.680017 | false |
ashishdeshpande/robotframework | src/robot/model/filter.py | 22 | 3465 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter
from .tags import TagPatterns
from .namepatterns import SuiteNamePatterns, TestNamePatterns
from .visitor import SuiteVisitor
class EmptySuiteRemover(SuiteVisitor):
def end_suite(self, suite):
suite.suites = [s for s in suite.suites if s.test_count]
def visit_test(self, test):
pass
def visit_keyword(self, kw):
pass
class Filter(EmptySuiteRemover):
def __init__(self, include_suites=None, include_tests=None,
include_tags=None, exclude_tags=None):
self.include_suites = include_suites
self.include_tests = include_tests
self.include_tags = include_tags
self.exclude_tags = exclude_tags
@setter
def include_suites(self, suites):
return SuiteNamePatterns(suites) \
if not isinstance(suites, SuiteNamePatterns) else suites
@setter
def include_tests(self, tests):
return TestNamePatterns(tests) \
if not isinstance(tests, TestNamePatterns) else tests
@setter
def include_tags(self, tags):
return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags
@setter
def exclude_tags(self, tags):
return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags
def start_suite(self, suite):
if not self:
return False
if hasattr(suite, 'starttime'):
suite.starttime = suite.endtime = None
if self.include_suites:
return self._filter_by_suite_name(suite)
if self.include_tests:
suite.tests = self._filter(suite, self._included_by_test_name)
if self.include_tags:
suite.tests = self._filter(suite, self._included_by_tags)
if self.exclude_tags:
suite.tests = self._filter(suite, self._not_excluded_by_tags)
return bool(suite.suites)
def _filter_by_suite_name(self, suite):
if self.include_suites.match(suite.name, suite.longname):
suite.visit(Filter(include_suites=[],
include_tests=self.include_tests,
include_tags=self.include_tags,
exclude_tags=self.exclude_tags))
return False
suite.tests = []
return True
def _filter(self, suite, filter):
return [t for t in suite.tests if filter(t)]
def _included_by_test_name(self, test):
return self.include_tests.match(test.name, test.longname)
def _included_by_tags(self, test):
return self.include_tags.match(test.tags)
def _not_excluded_by_tags(self, test):
return not self.exclude_tags.match(test.tags)
def __nonzero__(self):
return bool(self.include_suites or self.include_tests or
self.include_tags or self.exclude_tags)
| apache-2.0 | 3,106,772,659,293,571,600 | 33.65 | 79 | 0.650216 | false |
TeamExodus/external_chromium_org | tools/perf/record_android_profile.py | 27 | 1260 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import tempfile
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'telemetry'))
from telemetry.core import browser_finder
from telemetry.core import browser_options
def _RunPrebuilt(options):
browser_to_create = browser_finder.FindBrowser(options)
with browser_to_create.Create() as browser:
browser.Start()
output_file = os.path.join(tempfile.mkdtemp(), options.profiler)
raw_input('Press enter to start profiling...')
print '>> Starting profiler', options.profiler
browser.platform.profiling_controller.Start(
options.profiler, output_file)
print 'Press enter or CTRL+C to stop'
try:
raw_input()
except KeyboardInterrupt:
pass
finally:
browser.platform.profiling_controller.Stop()
print '<< Stopped profiler ', options.profiler
if __name__ == '__main__':
browser_finder_options = browser_options.BrowserFinderOptions()
parser = browser_finder_options.CreateParser('')
profiler_options, _ = parser.parse_args()
sys.exit(_RunPrebuilt(profiler_options))
| bsd-3-clause | -8,781,608,121,010,270,000 | 31.307692 | 80 | 0.721429 | false |
ihuston/pyflation | pyflation/cmpotentials.py | 1 | 36913 | # -*- coding: utf-8 -*-
"""cmpotentials.py - Cosmological potentials for cosmomodels.py
Provides functions which can be used with cosmomodels.py.
Default parameter values are included but can also be
specified as a dictionary.
"""
#Author: Ian Huston
#For license and copyright information see LICENSE.txt which was distributed with this file.
from __future__ import division
import numpy as np
def msqphisq(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for V=1/2 m^2 phi^2
where m is the mass of the inflaton field.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameter "mass" which specifies m above.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
m can be specified in the dictionary params or otherwise
it defaults to the mass as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "mass" in params:
m = params["mass"]
else:
#Use WMAP value of mass (in Mpl)
m = 6.3267e-6
if len(y.shape)>1:
y = y[:,0]
# The shape of the potentials is important to be consistent with the
# multifield case. The following shapes should be used for a single field
# model:
#
# U : scalar (use np.asscalar)
# dUdphi : 1d vector (use np.atleast_1d)
# d2Udphi2 : 2d array (use np.atleast_2d)
# d3Udphi3 : 3d array (use np.atleast_3d)
#Use inflaton mass
mass2 = m**2
#potential U = 1/2 m^2 \phi^2
U = np.asscalar(0.5*(mass2)*(y[0]**2))
#deriv of potential wrt \phi
dUdphi = np.atleast_1d((mass2)*y[0])
#2nd deriv
d2Udphi2 = np.atleast_2d(mass2)
#3rd deriv
d3Udphi3 = np.atleast_3d(0)
return U, dUdphi, d2Udphi2, d3Udphi3
def lambdaphi4(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for V=1/4 lambda phi^4
for a specified lambda.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameter "lambda" which specifies lambda
above.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
lambda can be specified in the dictionary params or otherwise
it defaults to the value as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "lambda" in params:
l = params["lambda"]
else:
#Use WMAP value of lambda
l = 1.5506e-13
if len(y.shape)>1:
y = y[:,0]
# The shape of the potentials is important to be consistent with the
# multifield case. The following shapes should be used for a single field
# model:
#
# U : scalar (use np.asscalar)
# dUdphi : 1d vector (use np.atleast_1d)
# d2Udphi2 : 2d array (use np.atleast_2d)
# d3Udphi3 : 3d array (use np.atleast_3d)
#potential U = 1/4 l \phi^4
U = np.asscalar(0.25*l*(y[0]**4))
#deriv of potential wrt \phi
dUdphi = np.atleast_1d(l*(y[0]**3))
#2nd deriv
d2Udphi2 = np.atleast_2d(3*l*(y[0]**2))
#3rd deriv
d3Udphi3 = np.atleast_3d(6*l*(y[0]))
return U, dUdphi, d2Udphi2, d3Udphi3
def linde(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for Linde potential
V = -m^2/2 \phi^2 +\lambda/4 \phi^4 + m^4/4lambda
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameters "mass" and "lambda" which specifies
the variables.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
lambda can be specified in the dictionary params or otherwise
it defaults to the value as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
mass can be specified in the dictionary params or otherwise
it defaults to the mass as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "mass" in params:
m = params["mass"]
else:
#Use Salopek et al value of mass (in Mpl)
m = 5e-8
#Use inflaton mass
mass2 = m**2
#Check if mass is specified in params
if params is not None and "lambda" in params:
l = params["lambda"]
else:
#Use WMAP value of lambda
#l = 1.5506e-13
l = 1.55009e-13
if len(y.shape)>1:
y = y[:,0]
# The shape of the potentials is important to be consistent with the
# multifield case. The following shapes should be used for a single field
# model:
#
# U : scalar (use np.asscalar)
# dUdphi : 1d vector (use np.atleast_1d)
# d2Udphi2 : 2d array (use np.atleast_2d)
# d3Udphi3 : 3d array (use np.atleast_3d)
U = np.asscalar(-0.5*(mass2)*(y[0]**2) + 0.25*l*(y[0]**4) + (m**4)/(4*l))
#deriv of potential wrt \phi
dUdphi = np.atleast_1d(-(mass2)*y[0] + l*(y[0]**3))
#2nd deriv
d2Udphi2 = np.atleast_2d(-mass2 + 3*l*(y[0]**2))
#3rd deriv
d3Udphi3 = np.atleast_3d(6*l*(y[0]))
return U, dUdphi, d2Udphi2, d3Udphi3
def hybrid2and4(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for hybrid potential
V = -m^2/2 \phi^2 +\lambda/4 \phi^4
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameters "mass" and "lambda" which specifies
the variables.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
lambda can be specified in the dictionary params or otherwise
it defaults to the value as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
mass can be specified in the dictionary params or otherwise
it defaults to the mass as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "mass" in params:
m = params["mass"]
else:
#Use Salopek et al value of mass (in Mpl)
m = 5e-8
#Use inflaton mass
mass2 = m**2
#Check if mass is specified in params
if params is not None and "lambda" in params:
l = params["lambda"]
else:
#Use WMAP value of lambda
l = 1.55123e-13
if len(y.shape)>1:
y = y[:,0]
# The shape of the potentials is important to be consistent with the
# multifield case. The following shapes should be used for a single field
# model:
#
# U : scalar (use np.asscalar)
# dUdphi : 1d vector (use np.atleast_1d)
# d2Udphi2 : 2d array (use np.atleast_2d)
# d3Udphi3 : 3d array (use np.atleast_3d)
U = np.asscalar(0.5*(mass2)*(y[0]**2) + 0.25*l*(y[0]**4))
#deriv of potential wrt \phi
dUdphi = np.atleast_1d((mass2)*y[0] + l*(y[0]**3))
#2nd deriv
d2Udphi2 = np.atleast_2d(mass2 + 3*l*(y[0]**2))
#3rd deriv
d3Udphi3 = np.atleast_3d(6*l*(y[0]))
return U, dUdphi, d2Udphi2, d3Udphi3
def phi2over3(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for V= sigma phi^(2/3)
for a specified sigma.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameter "sigma" which specifies lambda
above.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
sigma can be specified in the dictionary params or otherwise
it defaults to the value as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "sigma" in params:
s = params["sigma"]
else:
#Use WMAP value of lambda
s = 3.81686e-10 #Unit Mpl^{10/3}
if len(y.shape)>1:
y = y[:,0]
# The shape of the potentials is important to be consistent with the
# multifield case. The following shapes should be used for a single field
# model:
#
# U : scalar (use np.asscalar)
# dUdphi : 1d vector (use np.atleast_1d)
# d2Udphi2 : 2d array (use np.atleast_2d)
# d3Udphi3 : 3d array (use np.atleast_3d)
#potential U = 1/4 s \phi^4
U = np.asscalar(s*(y[0]**(2.0/3)))
#deriv of potential wrt \phi
dUdphi = np.atleast_1d((2.0/3)*s*(y[0]**(-1.0/3)))
#2nd deriv
d2Udphi2 = np.atleast_2d(-(2.0/9)*s*(y[0]**(-4.0/3)))
#3rd deriv
d3Udphi3 = np.atleast_3d((8.0/27)*s*(y[0]**(-7.0/3)))
return U, dUdphi, d2Udphi2, d3Udphi3
def msqphisq_withV0(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for V=1/2 m^2 phi^2 + V0
where m is the mass of the inflaton field.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameter "mass" which specifies m above.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
m can be specified in the dictionary params or otherwise
it defaults to the mass as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "mass" in params:
m = params["mass"]
else:
#Use WMAP value of mass (in Mpl)
m = 1.7403553e-06
if params is not None and "V0" in params:
V0 = params["V0"]
else:
V0 = 5e-10 # Units Mpl^4
if len(y.shape)>1:
y = y[:,0]
# The shape of the potentials is important to be consistent with the
# multifield case. The following shapes should be used for a single field
# model:
#
# U : scalar (use np.asscalar)
# dUdphi : 1d vector (use np.atleast_1d)
# d2Udphi2 : 2d array (use np.atleast_2d)
# d3Udphi3 : 3d array (use np.atleast_3d)
#Use inflaton mass
mass2 = m**2
#potential U = 1/2 m^2 \phi^2
U = np.asscalar(0.5*(mass2)*(y[0]**2) + V0)
#deriv of potential wrt \phi
dUdphi = np.atleast_1d((mass2)*y[0])
#2nd deriv
d2Udphi2 = np.atleast_2d(mass2)
#3rd deriv
d3Udphi3 = np.atleast_3d(0)
return U, dUdphi, d2Udphi2, d3Udphi3
def step_potential(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for
V=1/2 m^2 phi^2 ( 1 + c*tanh((phi-phi_s) / d)
where m is the mass of the inflaton field and c, d and phi_s are provided.
Form is taken from Chen etal. arxiv:0801.3295.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameter "mass" which specifies m above.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
m can be specified in the dictionary params or otherwise
it defaults to the mass as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "mass" in params:
m = params["mass"]
else:
#Use WMAP value of mass (in Mpl)
m = 6.3267e-6
if params is not None:
c = params.get("c", 0.0018)
d = params.get("d", 0.022) #Units of Mpl
phi_s = params.get("phi_s", 14.84) #Units of Mpl
else:
c = 0.0018
d = 0.022
phi_s = 14.84
if len(y.shape)>1:
y = y[:,0]
# The shape of the potentials is important to be consistent with the
# multifield case. The following shapes should be used for a single field
# model:
#
# U : scalar (use np.asscalar)
# dUdphi : 1d vector (use np.atleast_1d)
# d2Udphi2 : 2d array (use np.atleast_2d)
# d3Udphi3 : 3d array (use np.atleast_3d)
#Use inflaton mass
mass2 = m**2
#potential U = 1/2 m^2 \phi^2
phisq = y[0]**2
phiterm = (y[0]-phi_s)/d
s = 1/np.cosh(phiterm)
t = np.tanh(phiterm)
U = np.asscalar(0.5*(mass2)*(y[0]**2) * (1 + c * (t - 1)))
#deriv of potential wrt \phi
dUdphi = np.atleast_1d((mass2)*y[0] * (1 + c*(t-1)) + c * mass2 * phisq * s**2 / (2*d))
#2nd deriv
d2Udphi2 = np.atleast_2d(0.5*mass2*(4*c*y[0]*s**2/d - 2*c*phisq*s**2*t/(d**2) + 2*(1+c*(t-1))))
#3rd deriv
d3Udphi3 = np.atleast_3d(0.5*mass2*(6*c*s**2/d - 12*c*y[0]*s**2*t/(d**2)
+ c*phisq*(-2*s**4/(d**3) + 4*s**2*t**2/(d**3))))
return U, dUdphi, d2Udphi2, d3Udphi3
def bump_potential(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for
V=1/2 m^2 phi^2 ( 1 + c*sech((phi-phi_b) / d)
where m is the mass of the inflaton field and c, d and phi_b are provided.
Form is taken from Chen etal. arxiv:0801.3295.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameter "mass" which specifies m above.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
m can be specified in the dictionary params or otherwise
it defaults to the mass as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "mass" in params:
m = params["mass"]
else:
#Use WMAP value of mass (in Mpl)
m = 6.3267e-6
if params is not None:
c = params.get("c", 0.0005)
d = params.get("d", 0.01) #Units of Mpl
phi_b = params.get("phi_b", 14.84) #Units of Mpl
else:
c = 0.0005
d = 0.01
phi_b = 14.84
#Use inflaton mass
mass2 = m**2
#potential U = 1/2 m^2 \phi^2
if len(y.shape)>1:
y = y[:,0]
# The shape of the potentials is important to be consistent with the
# multifield case. The following shapes should be used for a single field
# model:
#
# U : scalar (use np.asscalar)
# dUdphi : 1d vector (use np.atleast_1d)
# d2Udphi2 : 2d array (use np.atleast_2d)
# d3Udphi3 : 3d array (use np.atleast_3d)
phisq = y[0]**2
phiterm = (y[0]-phi_b)/d
s = 1/np.cosh(phiterm)
t = np.tanh(phiterm)
U = np.asscalar(0.5*(mass2)*(y[0]**2) * (1 + c * s))
#deriv of potential wrt \phi
dUdphi = np.atleast_1d((mass2)*y[0] * (1 + c*s) - c * mass2 * phisq * s*t / (2*d))
#2nd deriv
d2Udphi2 = np.atleast_2d(0.5*mass2*(-4*c*y[0]*s*t/d + c*phisq*(-s**3/(d**2) + s*(t**2)/(d**2)) + 2*(1+c*s)))
#3rd deriv
d3Udphi3 = np.atleast_3d(0.5*mass2*(-6*c*s*t/d + 6*c*y[0]*(-s**3/(d**2) + s*(t**2)/(d**2))
+ c*phisq*(5*s**3*t/(d**3) - s*t**3/(d**3))))
return U, dUdphi, d2Udphi2, d3Udphi3
def resonance(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for
V=1/2 m^2 phi^2 ( 1 + c*sin(phi / d) )
where m is the mass of the inflaton field and c, d and phi_b are provided.
Form is taken from Chen etal. arxiv:0801.3295.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameter "mass" which specifies m above,
and the parameters "c" and "d" which tune the oscillation.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
m can be specified in the dictionary params or otherwise
it defaults to the mass as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "mass" in params:
m = params["mass"]
else:
#Use WMAP value of mass (in Mpl)
m = 6.3267e-6
if params is not None:
c = params.get("c", 5e-7)
d = params.get("d", 0.0007) #Units of Mpl
else:
c = 5e-7
d = 0.0007
#Use inflaton mass
mass2 = m**2
#potential U = 1/2 m^2 \phi^2
if len(y.shape)>1:
y = y[:,0]
# The shape of the potentials is important to be consistent with the
# multifield case. The following shapes should be used for a single field
# model:
#
# U : scalar (use np.asscalar)
# dUdphi : 1d vector (use np.atleast_1d)
# d2Udphi2 : 2d array (use np.atleast_2d)
# d3Udphi3 : 3d array (use np.atleast_3d)
phi = y[0]
phisq = phi**2
phiterm = phi/d
sphi = np.sin(phiterm)
cphi = np.cos(phiterm)
U = np.asscalar(0.5*(mass2)*(phisq) * (1 + c * sphi))
#deriv of potential wrt \phi
dUdphi = np.atleast_1d((mass2)*phi * (1 + c*sphi) + c * mass2 * phisq * cphi / (2*d))
#2nd deriv
d2Udphi2 = np.atleast_2d(mass2*((1+c*sphi) + 2*c/d * cphi * phi))
#3rd deriv
d3Udphi3 = np.atleast_3d(mass2*(3*c/d*cphi -3*c/d**2*sphi * phi -0.5*c/d**3 *cphi * phisq))
return U, dUdphi, d2Udphi2, d3Udphi3
def bump_nothirdderiv(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for
V=1/2 m^2 phi^2 ( 1 + c*sech((phi-phi_b) / d)
where m is the mass of the inflaton field and c, d and phi_b are provided.
Form is taken from Chen etal. arxiv:0801.3295.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameter "mass" which specifies m above.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
m can be specified in the dictionary params or otherwise
it defaults to the mass as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "mass" in params:
m = params["mass"]
else:
#Use WMAP value of mass (in Mpl)
m = 6.3267e-6
if params is not None:
c = params.get("c", 0.0005)
d = params.get("d", 0.01) #Units of Mpl
phi_b = params.get("phi_b", 14.84) #Units of Mpl
else:
c = 0.0005
d = 0.01
phi_b = 14.84
#Use inflaton mass
mass2 = m**2
#potential U = 1/2 m^2 \phi^2
if len(y.shape)>1:
y = y[:,0]
# The shape of the potentials is important to be consistent with the
# multifield case. The following shapes should be used for a single field
# model:
#
# U : scalar (use np.asscalar)
# dUdphi : 1d vector (use np.atleast_1d)
# d2Udphi2 : 2d array (use np.atleast_2d)
# d3Udphi3 : 3d array (use np.atleast_3d)
phisq = y[0]**2
phiterm = (y[0]-phi_b)/d
s = 1/np.cosh(phiterm)
t = np.tanh(phiterm)
U = np.asscalar(0.5*(mass2)*(y[0]**2) * (1 + c * s))
#deriv of potential wrt \phi
dUdphi = np.atleast_1d((mass2)*y[0] * (1 + c*s) - c * mass2 * phisq * s*t / (2*d))
#2nd deriv
d2Udphi2 = np.atleast_2d(0.5*mass2*(-4*c*y[0]*s*t/d + c*phisq*(-s**3/(d**2) + s*(t**2)/(d**2)) + 2*(1+c*s)))
#3rd deriv
d3Udphi3 = np.atleast_3d(0.0)
return U, dUdphi, d2Udphi2, d3Udphi3
def hybridquadratic(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for
V = 1/2 m1^2 phi^2 + 1/2 m2^2 chi^2
where m1 and m2 are the masses of the fields. Needs nfields=2.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameters "m1" and "m2" specified above.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
"""
#Check if mass is specified in params
if params:
m1 = params.get("m1", 1.395464769e-6)
m2 = params.get("m2", 9.768253382e-6)
else:
m1 = 1.395464769e-6
m2 = 9.768253382e-6
if len(y.shape)>1:
y = y[:,0]
#Use inflaton mass
mass2 = np.array([m1, m2])**2
#potential U = 1/2 m^2 \phi^2
U = np.asscalar(0.5*(m1**2*y[0]**2 + m2**2*y[2]**2))
#deriv of potential wrt \phi
dUdphi = mass2*np.array([y[0],y[2]])
#2nd deriv
d2Udphi2 = mass2*np.eye(2)
#3rd deriv
d3Udphi3 = np.zeros((2,2,2))
return U, dUdphi, d2Udphi2, d3Udphi3
def ridge_twofield(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for V=V0 - g phi - 1/2 m^2 chi^2
where g is a parameter and m is the mass of the chi field. Needs nfields=2.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameters "V0", "g", "m".
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
"""
#Check if mass is specified in params
if params:
g = params.get("g", 1e-5)
m = params.get("m", 12e-5)
V0 = params.get("V0", 1)
else:
g = 1e-5
m = 12e-5
V0 = 1
if len(y.shape)>1:
y = y[:,0]
#potential U = 1/2 m^2 \phi^2
U = np.asscalar(V0 - g*y[0] - 0.5*m**2*y[2]**2)
#deriv of potential wrt \phi
dUdphi = np.array([-g, -m**2 * y[2]])
#2nd deriv
d2Udphi2 = np.array([[0,0], [0,-m**2]])
#3rd deriv
d3Udphi3 = np.zeros((2,2,2))
return U, dUdphi, d2Udphi2, d3Udphi3
def nflation(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for
V = \sum_\alpha 1/2 m^2 \phi_\alpha^2
where m is the mass of each of the fields.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameter "mass" which specifies m above.
The number of fields is specified through "nfields".
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
Notes
-----
m can be specified in the dictionary params or otherwise
it defaults to the mass as normalized with the WMAP spectrum
Pr = 2.457e-9 at the WMAP pivot scale of 0.002 Mpc^-1.
"""
#Check if mass is specified in params
if params is not None and "mass" in params:
m = params["mass"]
else:
#Use WMAP value of mass (in Mpl)
m = 6.3267e-6
nfields = params["nfields"]
if len(y.shape)>1:
y = y[:,0]
phis_ix = slice(0,nfields*2,2)
#Use inflaton mass
mass2 = m**2
#potential U = 1/2 m^2 \phi^2
U = np.sum(0.5*(mass2)*(y[phis_ix]**2))
#deriv of potential wrt \phi
dUdphi = (mass2)*y[phis_ix]
#2nd deriv
d2Udphi2 = mass2*np.eye(nfields, dtype=np.complex128)
#3rd deriv
d3Udphi3 = None
return U, dUdphi, d2Udphi2, d3Udphi3
def quartictwofield(y, params=None):
"""Return (V, dV/dphi, d2V/dphi2, d3V/dphi3) for
V= 1/2(m1^2 \phi^2 + 1/2 l1 \phi^4 + m2^2 \chi^2 + 1/2 l2 \chi^4)
where m1, m2, l1, l2 are parameters. Needs nfields=2.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values in this case should
hold the parameters "m1", "m2", "l1", "l2", as specified above.
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
"""
#Check if mass is specified in params
if params:
m1 = params.get("m1", 5e-6)
m2 = params.get("m2", 5e-8)
l1 = params.get("l1", 5e-10)
l2 = params.get("l2", 5e-14)
else:
m1 = 5e-6
m2 = 5e-8
if len(y.shape)>1:
y = y[:,0]
#potential U = 1/2 m^2 \phi^2
U = np.asscalar(0.5*(m1**2*y[0]**2 + 0.5*l1*y[0]**4 + m2**2*y[2]**2 + 0.5*l2*y[2]**4))
#deriv of potential wrt \phi
dUdphi = np.array([m1**2*y[0] + l1*y[0]**3, m2**2*y[2] + l2*y[2]**3])
#2nd deriv
d2Udphi2 = np.eye(2)*np.array([m1**2 + 3*l1*y[0]**2, m2**2 + 3*l2*y[2]**2])
#3rd deriv
d3Udphi3 = None
return U, dUdphi, d2Udphi2, d3Udphi3
def hybridquartic(y, params=None):
"""Return the potential and its first three derivatives for the hybrid
quartic model.
The potential is given by
.. math::
V = \Lambda^4 [ (1-\chi^2/v^2)^2 + \phi^2/\mu^2
+ 2\phi^2\chi^2/(\phi_c^2 v^2) ]
where the parameter are :math:`\Lambda, v, \mu and \phi_c`. Needs nfields=2.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values labelled "lambda" , "v",
"mu", "phi_c".
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
"""
#Check if mass is specified in params
if params:
l = params.get("lambda", 2.3644e-6)
v = params.get("v", 0.1)
mu = params.get("mu", 1e3)
phi_c = params.get("phi_c", 0.01)
else:
l = 2.3644e-6
v = 0.1
mu = 1e3
phi_c = 0.01
if len(y.shape)>1:
y = y[:,0]
phi = y[0]
chi = y[2]
l4 = l**4
phicv2 = (phi_c*v)**2
#potential U = 1/2 m^2 \phi^2
U = np.asscalar(l4 *((1-chi**2/v**2)**2 + phi**2/mu**2 + 2*(phi*chi)**2/phicv2))
#deriv of potential wrt \phi
dUdphi = l4*np.array([2*phi/mu**2 + 4*phi*chi**2/phicv2,
-4*chi/v**2 * (1-chi**2/v**2) + 4*phi**2*chi/phicv2])
#2nd deriv
d2Udphi2 = l4*np.array([[2/mu**2 + 4*chi**2/phicv2, # V phi phi
8*phi*chi/phicv2], # V phi chi
[8*phi*chi/phicv2, # V chi phi
-4/v**2 * (1-3*chi**2/v**2) + 4*phi**2/phicv2]]) # V chi chi
#3rd deriv Not set as not used in first order calculation
d3Udphi3 = np.zeros((2,2,2))
return U, dUdphi, d2Udphi2, d3Udphi3
def inflection(y, params=None):
r"""Return the potential and its first three derivatives for an inflection
point model.
The potential is given by
.. math::
V = V_0 + 0.5 m^2 \phi^2 + g \chi + 1/6 \lambda \chi^3 + \lambda/(8 r) \chi^4
where :math:`V_0 = 0.75gr + \lambda/24 r^3 + g/(4r^3)`
and the parameters are :math:`\lambda, m, g and r`. Needs nfields=2.
Parameters
----------
y : array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params : dict
Dictionary of parameter values labelled "lambda" , "m", "g", "r".
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3 : tuple of arrays
Tuple of the potential and its first three derivatives.
"""
#Check if mass is specified in params
if params:
l = params.get("lambda", 3e3)
g = params.get("g", 3e-2)
r = params.get("r", 0.14)
m = params.get("m", 1.0)
else:
l = 3e3
g = 3e-2
r = 0.14
m = 1.0
if len(y.shape)>1:
y = y[:,0]
V_0 = 0.75*g*r + l/24.0 * r**3
phi = y[0]
chi = y[2]
#potential U = 1/2 m^2 \phi^2
U = np.asscalar(V_0 + 0.5*m**2*phi**2 + g*chi + 1/6.0 * l * chi**3
+ (g/(4*r**3) + l/(8*r)) * chi**4)
#deriv of potential wrt \phi
dUdphi = np.array([m**2*phi, g + 0.5 * l * chi**2 + (g/(2*r**3) + l/(2*r)) * chi**3])
#2nd deriv
d2Udphi2 = np.array([[m**2, # V phi phi
0.0], # V phi chi
[0.0, # V chi phi
l*chi + 3/2*(g/r**3 + l/r) * chi**2]]) # V chi chi
#3rd deriv Not set as not used in first order calculation
d3Udphi3 = np.zeros((2,2,2))
return U, dUdphi, d2Udphi2, d3Udphi3
def hilltopaxion(y, params=None):
r"""Return the potential and its first three derivatives for a hilltop axion
model.
The potential is given by
.. math::
V = 0.5 m^2 \varphi^2 + \Lambda^4 (1 - \cos(2\pi\chi/f))
where the parameters are \Lambda, m, and f . Needs nfields=2.
Parameters
----------
y: array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params: dict
Dictionary of parameter values labelled "Lambda" , "m", "f".
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3: tuple of arrays
Tuple of the potential and its first three derivatives.
"""
#Check if mass is specified in params
if params:
l = params.get("Lambda", np.sqrt(6e-6/(4*np.pi)))
f = params.get("f", 1.0)
m = params.get("m", 6e-6)
else:
l = np.sqrt(6e-6/(4*np.pi))
f = 1.0
m = 6e-6
if len(y.shape)>1:
y = y[:,0]
phi = y[0]
chi = y[2]
twopif = 2*np.pi/f
#potential U = 1/2 m^2 \phi^2
U = np.asscalar(0.5*m**2*phi**2 + l**4*(1 - np.cos(twopif*chi)))
#deriv of potential wrt \phi
dUdphi = np.array([m**2*phi, l**4*(twopif)*np.sin(twopif*chi)])
#2nd deriv
d2Udphi2 = np.array([[m**2, # V phi phi
0.0], # V phi chi
[0.0, # V chi phi
l**4*(twopif)**2*np.cos(twopif*chi)]]) # V chi chi
#3rd deriv Not set as not used in first order calculation
d3Udphi3 = np.zeros((2,2,2))
return U, dUdphi, d2Udphi2, d3Udphi3
def productexponential(y, params=None):
r"""Return the potential and its first three derivatives for a product
exponential potential.
The potential is given by
.. math::
V = V_0 \phi^2 \exp(-\lambda \chi^2)
where the parameters are :math:`V_0, \lambda`. Needs nfields=2.
Parameters
----------
y: array
Array of variables with background phi as y[0]
If you want to specify a vector of phi values, make sure
that the first index still runs over the different
variables, using newaxis if necessary.
params: dict
Dictionary of parameter values labelled "lambda" , "V_0".
Returns
-------
U, dUdphi, d2Udphi2, d3Udphi3: tuple of arrays
Tuple of the potential and its first three derivatives.
"""
#Check if mass is specified in params
if params:
l = params.get("lambda", 0.05)
V_0 = params.get("V_0", 5.3705e-13)
else:
l = 0.05
V_0 = 5.3705e-13
if len(y.shape)>1:
y = y[:,0]
phi = y[0]
chi = y[2]
explchi2 = np.exp(-l*chi**2)
#potential U
U = np.asscalar(V_0*phi**2 * explchi2)
#deriv of potential wrt \phi
dUdphi = np.array([2*V_0*phi*explchi2,
-2*l*chi*V_0*phi**2*explchi2])
#2nd deriv
d2Udphi2 = np.array([[2*V_0*explchi2, # V phi phi
-4*l*chi*V_0*phi*explchi2], # V phi chi
[-4*l*chi*V_0*phi*explchi2, # V chi phi
-2*l*V_0*phi**2*explchi2*(1-2*l*chi)]]) # V chi chi
#3rd deriv Not set as not used in first order calculation
d3Udphi3 = np.zeros((2,2,2))
return U, dUdphi, d2Udphi2, d3Udphi3
| bsd-3-clause | 8,137,346,741,227,419,000 | 31.266608 | 112 | 0.565492 | false |
shannonlucas/aerodata | aerodata/weather/metar/commands/wind.py | 1 | 3949 | # Copyright 2015 Shannon Lucas
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from ordtext import OrderedParser, ParseCommandError
from ordtext.grammar import GrammarElement
from ordtext.commands import AbstractCommand, PatternMatch, SimpleMatch
from aerodata.units import velocity
from aerodata.weather.metar.models import WindModel
class Wind(AbstractCommand):
"""Parses wind group data from a METAR.
.. sidebar:: Sources:
- NOAA/FAA AC 00-45F, Change 2, Section 3.1.3.5
- WMO FM 15-XIV METAR/FM 16-XIV SPECI, Regulation 15.5
"""
_WIND = PatternMatch((r"^(?P<direction>\d{3}|VRB)"
r"(?P<speed>\d{2,3})(?:G(?P<gust>\d{2,3}))*"
r"(?P<units>KT|KMH|MPS)$"), by_name=True)
_VARIABILITY = PatternMatch(r"^(?P<from>\d{3})V(?P<to>\d{3})$",
by_name=True)
_GRAMMAR = (
GrammarElement(_WIND,
name="wind", min_count=1, max_count=1,
description="Wind direction, speed, and gusts"),
GrammarElement(_VARIABILITY,
name="variability", min_count=0, max_count=1,
description="Wind variability.")
)
_PARSER = OrderedParser(_GRAMMAR)
def __call__(self, tokens):
"""Extracts the wind information from the METAR.
:param Sequence[str] tokens: the sequence of tokens being parsed.
:return: a tuple containing the wind information (first element) and a
sequence of the remaining tokens (second element).
:rtype: (WindModel, Sequence)
"""
m, remainder = Wind._PARSER(tokens)
wind = m["wind"]
variable = wind["direction"] == "VRB"
variability = m["variability"]
wind_units = velocity.KNOTS if wind["units"] == "KT" \
else velocity.KILOMETERS_PER_HOUR if wind["units"] == "KMH" \
else velocity.METERS_PER_SECOND
direction = None if variable else int(wind["direction"])
speed = int(wind["speed"])
gust = int(wind["gust"]) if wind["gust"] else None
var_from = int(variability["from"]) if variability else None
var_to = int(variability["to"]) if variability else None
parsed = WindModel(direction=direction, speed=speed, gust=gust,
units=wind_units, is_variable=variable,
variable_from=var_from, variable_to=var_to)
return parsed, remainder
class WindShear(AbstractCommand):
"""Parses wind shear data from a METAR.
.. sidebar:: Sources:
- WMO FM 15-XIV METAR/FM 16-XIV SPECI, Regulation 15.13.3
"""
_WS = SimpleMatch("WS")
_RUNWAY = PatternMatch(r"^(R|RWY)(?P<runway>\d\d[LCR]?)$", by_name=True)
def __call__(self, tokens):
"""Extracts the wind shear information from the METAR.
:param Sequence[str] tokens: the sequence of tokens being parsed.
:return: a tuple containing the ID of the runway experiencing wind
shear (first element) and a sequence of the remaining tokens
(second element).
:rtype: (str, Sequence)
"""
if tokens[0] != "WS":
raise ParseCommandError
if (tokens[1] == "ALL") and (tokens[2] == "RWY"):
return "ALL", tokens[3:]
else:
m, remainder = WindShear._RUNWAY(tokens[1:])
return m["runway"], remainder
| apache-2.0 | -1,861,455,526,159,308,800 | 37.339806 | 79 | 0.609521 | false |
silverlogic/blockhunt-back | blockhunt/hunts/serializers.py | 1 | 3502 | import random
from django.db.models import F
from rest_framework import serializers
import coinbase.wallet.error
import dj_coinbase
from expander import ExpanderSerializerMixin
from blockhunt.stores.models import Store
from blockhunt.stores.serializers import StoreSerializer
from .models import Hunter, Checkin
names = [
('Bruce', 'Bitlee'),
]
class HunterSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True)
class Meta:
model = Hunter
fields = ('id', 'email', 'password', 'first_name', 'last_name', 'balance')
read_only_fields = ('balance',)
def create(self, validated_data):
if not validated_data.get('first_name') and not validated_data.get('last_name'):
random_name = random.choice(names)
validated_data['first_name'] = random_name[0]
validated_data['last_name'] = random_name[1]
user = super().create(validated_data)
user.set_password(validated_data['password'])
user.save()
return user
class HunterFacebookSerializer(serializers.Serializer):
access_token = serializers.CharField()
class CheckinSerializer(ExpanderSerializerMixin, serializers.ModelSerializer):
qrcode = serializers.CharField(write_only=True)
class Meta:
model = Checkin
fields = ('id', 'store', 'reward', 'qrcode')
expandable_fields = {
'store': (StoreSerializer, (), {'read_only': True})
}
read_only_fields = ('store', 'reward',)
def validate_qrcode(self, qrcode):
store_id = int(qrcode)
self.store = store = Store.objects.get(pk=store_id)
if store.balance < store.bounty:
raise serializers.ValidationError('Unfortunately the store does not have enough bitcoins to pay the bounty.')
return qrcode
def create(self, validated_data):
store = self.store
hunter = self.context['request'].user
if not hunter.coinbase_account_id:
coinbase_account = dj_coinbase.client.create_account(name='Hunter #' + str(hunter.pk))
hunter.coinbase_account_id = coinbase_account.id
hunter.save()
coinbase_address = dj_coinbase.client.create_address(hunter.coinbase_account_id)
try:
dj_coinbase.client.send_money(
store.coinbase_account_id,
to=coinbase_address.address,
amount=str(store.bounty),
currency='BTC',
fee='0.0001'
)
except coinbase.wallet.error.APIError as ex:
raise serializers.ValidationError(ex.message)
checkin = Checkin.objects.create(store=store,
reward=store.bounty,
hunter=hunter)
hunter.balance = F('balance') + store.bounty
hunter.save()
store.balance = F('balance') - store.bounty
store.save()
return checkin
class SendBitcoinSerializer(serializers.Serializer):
address = serializers.CharField()
amount = serializers.DecimalField(max_digits=12, decimal_places=8)
def validate_amount(self, amount):
hunter = self.context['request'].user
if amount > hunter.balance:
raise serializers.ValidationError('You don\'t own that many bitcoins.')
if amount <= 0:
raise serializers.ValidationError('You cannot send that many bitcoins.')
return amount
| mit | 3,758,222,307,163,068,400 | 33 | 121 | 0.630782 | false |
sdague/home-assistant | homeassistant/components/google_translate/tts.py | 3 | 4669 | """Support for the Google speech service."""
import asyncio
import logging
import re
import aiohttp
from aiohttp.hdrs import REFERER, USER_AGENT
import async_timeout
from gtts_token import gtts_token
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
from homeassistant.const import HTTP_OK
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
GOOGLE_SPEECH_URL = "https://translate.google.com/translate_tts"
MESSAGE_SIZE = 148
SUPPORT_LANGUAGES = [
"af",
"sq",
"ar",
"hy",
"bn",
"ca",
"zh",
"zh-cn",
"zh-tw",
"zh-yue",
"hr",
"cs",
"da",
"nl",
"en",
"en-au",
"en-uk",
"en-us",
"eo",
"fi",
"fr",
"de",
"el",
"hi",
"hu",
"is",
"id",
"it",
"ja",
"ko",
"la",
"lv",
"mk",
"no",
"pl",
"pt",
"pt-br",
"ro",
"ru",
"sr",
"sk",
"es",
"es-es",
"es-mx",
"es-us",
"sw",
"sv",
"ta",
"th",
"tr",
"vi",
"cy",
"uk",
"bg-BG",
]
DEFAULT_LANG = "en"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES)}
)
async def async_get_engine(hass, config, discovery_info=None):
"""Set up Google speech component."""
return GoogleProvider(hass, config[CONF_LANG])
class GoogleProvider(Provider):
"""The Google speech API provider."""
def __init__(self, hass, lang):
"""Init Google TTS service."""
self.hass = hass
self._lang = lang
self.headers = {
REFERER: "http://translate.google.com/",
USER_AGENT: (
"Mozilla/5.0 (Windows NT 10.0; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/47.0.2526.106 Safari/537.36"
),
}
self.name = "Google"
@property
def default_language(self):
"""Return the default language."""
return self._lang
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
async def async_get_tts_audio(self, message, language, options=None):
"""Load TTS from google."""
token = gtts_token.Token()
websession = async_get_clientsession(self.hass)
message_parts = self._split_message_to_parts(message)
data = b""
for idx, part in enumerate(message_parts):
try:
part_token = await self.hass.async_add_executor_job(
token.calculate_token, part
)
except ValueError as err:
# If token seed fetching fails.
_LOGGER.warning(err)
return None, None
url_param = {
"ie": "UTF-8",
"tl": language,
"q": part,
"tk": part_token,
"total": len(message_parts),
"idx": idx,
"client": "tw-ob",
"textlen": len(part),
}
try:
with async_timeout.timeout(10):
request = await websession.get(
GOOGLE_SPEECH_URL, params=url_param, headers=self.headers
)
if request.status != HTTP_OK:
_LOGGER.error(
"Error %d on load URL %s", request.status, request.url
)
return None, None
data += await request.read()
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout for google speech")
return None, None
return "mp3", data
@staticmethod
def _split_message_to_parts(message):
"""Split message into single parts."""
if len(message) <= MESSAGE_SIZE:
return [message]
punc = "!()[]?.,;:"
punc_list = [re.escape(c) for c in punc]
pattern = "|".join(punc_list)
parts = re.split(pattern, message)
def split_by_space(fullstring):
"""Split a string by space."""
if len(fullstring) > MESSAGE_SIZE:
idx = fullstring.rfind(" ", 0, MESSAGE_SIZE)
return [fullstring[:idx]] + split_by_space(fullstring[idx:])
return [fullstring]
msg_parts = []
for part in parts:
msg_parts += split_by_space(part)
return [msg for msg in msg_parts if len(msg) > 0]
| apache-2.0 | 7,522,089,321,566,281,000 | 23.967914 | 82 | 0.51103 | false |
delphinus1024/opencv30hdr | make_list.py | 1 | 1140 | #!/usr/bin/env python
import subprocess
import os.path
import sys
lines = []
def call_exiv2(fullpath,fn):
global lines
cmd = "exiv2 " + "-Pkv " + fullpath
p = subprocess.Popen(cmd,shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE)
for i in p.stdout.read().split('\n'):
lst = i.split()
if len(lst) > 0:
if lst[0] == "Exif.Photo.ExposureTime":
line = []
ss = lst[1].split('/')
denom = float(ss[0])
num = float(ss[1])
line.append(fn)
line.append(num / denom)
lines.append(line)
if __name__ == "__main__":
argvs = sys.argv
argc = len(argvs)
if (argc != 2):
print "Usage python make_list [image folder]"
quit()
srcfolder = argvs[1]
if (not os.path.exists(srcfolder)) :
print srcfolder, " does not exists."
quit()
filelst = os.listdir(srcfolder)
#print "file list=", filelst
for fn in filelst:
name = os.path.splitext(fn)
base = name[0]
ext = name[1]
if (ext != ".tif"):
continue
srcfn = os.path.join(srcfolder,fn)
call_exiv2(srcfn,fn)
lines = sorted(lines,reverse=True,key=lambda x: x[1])
for line in lines:
print line[0]," ",line[1]
| mit | 4,486,225,379,823,488,000 | 18.655172 | 82 | 0.615789 | false |
hothHowler/lda | lda/utils.py | 4 | 5181 | from __future__ import absolute_import, unicode_literals # noqa
import logging
import numbers
import sys
import numpy as np
PY2 = sys.version_info[0] == 2
if PY2:
import itertools
zip = itertools.izip
logger = logging.getLogger('lda')
def check_random_state(seed):
if seed is None:
# i.e., use existing RandomState
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError("{} cannot be used as a random seed.".format(seed))
def matrix_to_lists(doc_word):
"""Convert a (sparse) matrix of counts into arrays of word and doc indices
Parameters
----------
doc_word : array or sparse matrix (D, V)
document-term matrix of counts
Returns
-------
(WS, DS) : tuple of two arrays
WS[k] contains the kth word in the corpus
DS[k] contains the document index for the kth word
"""
if np.count_nonzero(doc_word.sum(axis=1)) != doc_word.shape[0]:
logger.warning("all zero row in document-term matrix found")
if np.count_nonzero(doc_word.sum(axis=0)) != doc_word.shape[1]:
logger.warning("all zero column in document-term matrix found")
sparse = True
try:
# if doc_word is a scipy sparse matrix
doc_word = doc_word.copy().tolil()
except AttributeError:
sparse = False
if sparse and not np.issubdtype(doc_word.dtype, int):
raise ValueError("expected sparse matrix with integer values, found float values")
ii, jj = np.nonzero(doc_word)
if sparse:
ss = tuple(doc_word[i, j] for i, j in zip(ii, jj))
else:
ss = doc_word[ii, jj]
n_tokens = int(doc_word.sum())
DS = np.repeat(ii, ss).astype(np.intc)
WS = np.empty(n_tokens, dtype=np.intc)
startidx = 0
for i, cnt in enumerate(ss):
cnt = int(cnt)
WS[startidx:startidx + cnt] = jj[i]
startidx += cnt
return WS, DS
def lists_to_matrix(WS, DS):
"""Convert array of word (or topic) and document indices to doc-term array
Parameters
-----------
(WS, DS) : tuple of two arrays
WS[k] contains the kth word in the corpus
DS[k] contains the document index for the kth word
Returns
-------
doc_word : array (D, V)
document-term array of counts
"""
D = max(DS) + 1
V = max(WS) + 1
doc_word = np.empty((D, V), dtype=np.intc)
for d in range(D):
for v in range(V):
doc_word[d, v] = np.count_nonzero(WS[DS == d] == v)
return doc_word
def dtm2ldac(dtm, offset=0):
"""Convert a document-term matrix into an LDA-C formatted file
Parameters
----------
dtm : array of shape N,V
Returns
-------
doclines : iterable of LDA-C lines suitable for writing to file
Notes
-----
If a format similar to SVMLight is desired, `offset` of 1 may be used.
"""
try:
dtm = dtm.tocsr()
except AttributeError:
pass
assert np.issubdtype(dtm.dtype, int)
n_rows = dtm.shape[0]
for i, row in enumerate(dtm):
try:
row = row.toarray().squeeze()
except AttributeError:
pass
unique_terms = np.count_nonzero(row)
if unique_terms == 0:
raise ValueError("dtm row {} has all zero entries.".format(i))
term_cnt_pairs = [(i + offset, cnt) for i, cnt in enumerate(row) if cnt > 0]
docline = str(unique_terms) + ' '
docline += ' '.join(["{}:{}".format(i, cnt) for i, cnt in term_cnt_pairs])
if (i + 1) % 1000 == 0:
logger.info("dtm2ldac: on row {} of {}".format(i + 1, n_rows))
yield docline
def ldac2dtm(stream, offset=0):
"""Convert an LDA-C formatted file to a document-term array
Parameters
----------
stream: file object
File yielding unicode strings in LDA-C format.
Returns
-------
dtm : array of shape N,V
Notes
-----
If a format similar to SVMLight is the source, an `offset` of 1 may be used.
"""
doclines = stream
# We need to figure out the dimensions of the dtm.
N = 0
V = -1
data = []
for l in doclines:
l = l.strip()
# skip empty lines
if not l:
continue
unique_terms = int(l.split(' ')[0])
term_cnt_pairs = [s.split(':') for s in l.split(' ')[1:]]
for v, _ in term_cnt_pairs:
# check that format is indeed LDA-C with the appropriate offset
if int(v) == 0 and offset == 1:
raise ValueError("Indexes in LDA-C are offset 1")
term_cnt_pairs = tuple((int(v) - offset, int(cnt)) for v, cnt in term_cnt_pairs)
np.testing.assert_equal(unique_terms, len(term_cnt_pairs))
V = max(V, *[v for v, cnt in term_cnt_pairs])
data.append(term_cnt_pairs)
N += 1
V = V + 1
dtm = np.zeros((N, V), dtype=np.intc)
for i, doc in enumerate(data):
for v, cnt in doc:
np.testing.assert_equal(dtm[i, v], 0)
dtm[i, v] = cnt
return dtm
| mpl-2.0 | 2,622,867,175,849,145,300 | 27.783333 | 90 | 0.579618 | false |
darktears/crosswalk | tools/reflection_generator/java_method.py | 2 | 36000 | # Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from collections import OrderedDict
from string import Template
def ConvertClassExpressionToClassType(class_name):
""" Turn "Map<String, String>" to Map.class. """
generic_re = re.compile('[a-zA-z0-9]+(\<[a-zA-Z0-9]+,\s[a-zA-z0-9]+\>)')
if re.match(generic_re, class_name):
return '%s.class' % class_name.split('<')[0]
""" Turn "final HashMap<String>" to HashMap.class. """
return '%s.class' % class_name.split()[-1].split('<')[0]
def ConvertPrimitiveTypeToObject(class_name):
primitive_map = {
'byte': 'Byte',
'short': 'Short',
'int': 'Integer',
'long': 'Long',
'float': 'Float',
'double': 'Double',
'char': 'Character',
'boolean': 'Boolean',
}
return primitive_map.get(class_name, class_name)
def GetPrimitiveTypeDefaultValue(class_name):
primitive_map = {
'byte': '0',
'short': '0',
'int': '0',
'long': '0L',
'float': '0.0f',
'double': '0.0d',
'char': "'\u0000'",
'boolean': 'false',
}
return primitive_map.get(class_name, 'null')
class ParamType(object):
"""Internal representation of the type of a parameter of a method."""
def __init__(self, expression, class_loader):
self._expression = expression
self._modifier = ''
self._generic_type = ''
self._generic_type_parameters = []
self._contains_internal_class = False
self.ParseType(class_loader)
self._contains_internal_class = self._contains_internal_class or\
class_loader.IsInternalClass(self._generic_type)
def ParseType(self, class_loader):
param_type_re = re.compile('(?P<modifier>(\w+ )*)'
'(?P<generic>(\w+))(?P<type_params>(<.*>)?)')
for match in re.finditer(param_type_re, self._expression):
self._modifier = match.group('modifier')
self._generic_type = match.group('generic')
type_params = match.group('type_params')
if len(type_params) > 1:
type_params = type_params[1:-1]
self._generic_type_parameters = [ParamType(param.strip(),
class_loader) for param in type_params.split(',')]
for type_param in self._generic_type_parameters:
if self.generic_type == 'ValueCallback':
print 'value callback with %s' % type_param.generic_type
if type_param.contains_internal_class:
self._contains_internal_class = True
break
@property
def expression(self):
return self._expression
@property
def modifier(self):
return self._modifier
@property
def generic_type(self):
return self._generic_type
@property
def generic_type_parameters(self):
return self._generic_type_parameters
@property
def contains_internal_class(self):
return self._contains_internal_class
class ParamStringType(object):
INTERNAL_DECLARE = 1
BRIDGE_DECLARE = 2
BRIDGE_DECLARE_FOR_WRAPPER = 3
BRIDGE_PASS_TO_SUPER = 4
BRIDGE_PASS_TO_WRAPPER = 5
INTERNAL_PASS_TO_BRIDGE = 6
BRIDGE_OVERRIDE_CONDITION = 7
WRAPPER_DECLARE = 8
WRAPPER_DECLARE_FOR_BRIDGE = 9
WRAPPER_PASS_TO_BRIDGE = 10
class MethodStringType(object):
BRIDGE_CONSTRUCTOR = 1
BRIDGE_STATIC = 2
BRIDGE_SUPER = 3
BRIDGE_OVERRIDE = 4
BRIDGE_WRAPPER = 5
WRAPPER_CONSTRUCTOR = 6
WRAPPER_STATIC = 7
WRAPPER_BRIDGE = 8
WRAPPER_INTERFACE = 9
class Method(object):
"""Internal representaion of a method."""
ANNOTATION_PRE_WRAPLINE = 'preWrapperLines'
ANNOTATION_POST_WRAPLINE = 'postWrapperLines'
ANNOTATION_POST_BRIDGELINE = 'postBridgeLines'
def __init__(self, class_name, class_loader,
is_constructor, is_static, is_abstract,
method_name, method_return, params, annotation, doc=''):
self._class_name = class_name
self._class_loader = class_loader
self._is_constructor = is_constructor
self._is_static = is_static
self._is_abstract = is_abstract
self._is_delegate = False
self._disable_reflect_method = False
self._method_name = method_name
self._method_return = method_return
self._params = OrderedDict() # Use OrderedDict to avoid parameter misorder.
self._typed_params = OrderedDict()
self._method_annotations = {}
self._method_doc = doc
self._class_java_data = ''
self._method_declare_name = ''
self._internal_params_declare = ''
self._bridge_params_declare = ''
self._bridge_params_declare_for_wrapper = ''
self._bridge_params_pass_to_super = ''
self._bridge_params_pass_to_wrapper = ''
self._internal_params_pass_to_bridge = ''
self._bridge_override_condition = ''
self._wrapper_params_declare = ''
self._wrapper_params_declare_for_bridge = ''
self._wrapper_params_pass_to_bridge = ''
self._is_reservable = False
self.ParseMethodParams(params)
self.ParseMethodAnnotation(annotation)
def IsInternalClass(self, clazz):
return self._class_loader.IsInternalClass(clazz)
def GetJavaData(self, clazz):
return self._class_loader.GetJavaData(clazz)
def GenerateDoc(self, doc):
return self._class_loader.GenerateDoc(doc)
@property
def is_constructor(self):
return self._is_constructor
@property
def is_static(self):
return self._is_static
@property
def is_abstract(self):
return self._is_abstract
@property
def is_reservable(self):
return self._is_reservable
@property
def is_delegate(self):
return self._is_delegate
@property
def disable_reflect_method(self):
return self._disable_reflect_method
@property
def method_name(self):
return self._method_name
@property
def method_return(self):
return self._method_return
@property
def params(self):
return self._params
@property
def typed_params(self):
return self._typed_params
@property
def method_annotations(self):
return self._method_annotations
@property
def method_doc(self):
return self._method_doc
def ParseMethodParams(self, params):
# TODO(shouqun): Currently, generic parameters are not supported.
# The support of generic types should be added if such cases happen.
if not params or params == '':
return
subparams = re.findall("<.*?>", params) # To handle Map type
for index in range(len(subparams)):
params = params.replace(subparams[index], subparams[index].replace(", ", "-"))
for param in params.split(','):
param = param.strip()
param_list = param.split()
param_type = ' '.join(param_list[:-1]) # To handle modifiers
if re.search("<.*?>", param_type):
param_type = param_type.replace("-", ", ")
param_name = param_list[-1]
self._params[param_name] = param_type
self._typed_params[param_name] = ParamType(param_type, self._class_loader)
def ParseMethodAnnotation(self, annotation):
if annotation.find('reservable = true') >= 0:
self._is_reservable = True
delegate_re = re.compile('delegate\s*=\s*'
'(?P<delegate>(true|false))')
for match in re.finditer(delegate_re, annotation):
delegate = match.group('delegate')
if delegate == 'true':
self._is_delegate = True
elif delegate == 'false':
self._is_delegate = False
disable_reflect_method_re = re.compile('disableReflectMethod\s*=\s*'
'(?P<disableReflectMethod>(true|false))')
for match in re.finditer(disable_reflect_method_re, annotation):
disable_reflect_method = match.group('disableReflectMethod')
if disable_reflect_method == 'true':
self._disable_reflect_method = True
else:
self._disable_reflect_method = False
pre_wrapline_re = re.compile('preWrapperLines\s*=\s*\{\s*('
'?P<pre_wrapline>(".*")(,\s*".*")*)\s*\}')
for match in re.finditer(pre_wrapline_re, annotation):
pre_wrapline = self.FormatWrapperLine(match.group('pre_wrapline'))
self._method_annotations[self.ANNOTATION_PRE_WRAPLINE] = pre_wrapline
post_wrapline_re = re.compile('postWrapperLines\s*=\s*\{\s*('
'?P<post_wrapline>(".*")(,\s*".*")*)\s*\}')
for match in re.finditer(post_wrapline_re, annotation):
post_wrapline = self.FormatWrapperLine(match.group('post_wrapline'))
self._method_annotations[self.ANNOTATION_POST_WRAPLINE] = post_wrapline
post_bridgeline_re = re.compile('postBridgeLines\s*=\s*\{\s*('
'?P<post_bridgeline>(".*")(,\s*".*")*)\s*\}')
for match in re.finditer(post_bridgeline_re, annotation):
post_bridgeline = self.FormatWrapperLine(match.group('post_bridgeline'))
self._method_annotations[self.ANNOTATION_POST_BRIDGELINE] = post_bridgeline
def FormatWrapperLine(self, annotation_value):
""" annotaion_value is a java string array which each element is an
individual line. Probably like: ' "line1",\n "line2"'
This method is turnning it to ' line1\n line2'
"""
lines = []
exec('lines = [%s]' % annotation_value.replace('\n', ''))
template = Template('\n'.join(lines))
values = {}
for arg in range(1, len(self.params.keys())+1):
values['param%d' % arg] = self.params.keys()[arg-1]
return template.substitute(values)
def PrepareStrings(self):
self._class_java_data = self.GetJavaData(self._class_name)
self._method_declare_name = self.GenerateMethodDeclareName()
self._internal_params_declare = ', '.join(
self.GetFormattedParamArray(ParamStringType.INTERNAL_DECLARE))
self._bridge_params_declare = ', '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_DECLARE))
self._bridge_params_declare_for_wrapper = ', '.join(
self.GetFormattedParamArray(
ParamStringType.BRIDGE_DECLARE_FOR_WRAPPER, insert_empty=True))
self._bridge_params_pass_to_super = ', '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_PASS_TO_SUPER))
self._bridge_params_pass_to_wrapper = ', '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_PASS_TO_WRAPPER))
self._internal_params_pass_to_bridge = ', '.join(
self.GetFormattedParamArray(ParamStringType.INTERNAL_PASS_TO_BRIDGE))
self._bridge_override_condition = ' && '.join(
self.GetFormattedParamArray(ParamStringType.BRIDGE_OVERRIDE_CONDITION))
self._wrapper_params_declare = ', '.join(
self.GetFormattedParamArray(ParamStringType.WRAPPER_DECLARE))
self._wrapper_params_declare_for_bridge = ', '.join(
self.GetFormattedParamArray(
ParamStringType.WRAPPER_DECLARE_FOR_BRIDGE, insert_empty=True))
self._wrapper_params_pass_to_bridge = ', '.join(
self.GetFormattedParamArray(ParamStringType.WRAPPER_PASS_TO_BRIDGE))
def GetFormattedParamArray(self, param_string_type,
append_empty=False, insert_empty=False):
""" Return the array of params with specified format.
append or insert an empty string on demand for cases
that need extra splitter when using the array.
"""
formatted_params = []
for param_name in self._params:
param_type = self._params[param_name]
formatted_param = self.FormatSingleParam(
param_type, param_name, param_string_type)
if formatted_param:
formatted_params.append(formatted_param)
if append_empty:
formatted_params.append('')
if insert_empty:
formatted_params.insert(0, '')
return formatted_params
def FormatSingleParam(self, param_type, param_name, param_string_type):
is_internal_class = self.IsInternalClass(param_type)
if is_internal_class:
java_data = self.GetJavaData(param_type)
typed_param = self._typed_params[param_name]
if param_string_type == ParamStringType.INTERNAL_DECLARE:
# the way internal declares its params, will be used in bridge's override
# call.
# XWalkViewInternal view => XWalkViewInternal view
return '%s %s' % (param_type, param_name)
elif param_string_type == ParamStringType.BRIDGE_DECLARE:
# the way bridge declares its params, will be used in bridge's wrapper
# call and super call.
# XWalkViewInternal view => XWalkViewBridge view
if is_internal_class:
return '%s %s'% (java_data.GetBridgeName(), param_name)
else:
return '%s %s' % (param_type, param_name)
elif param_string_type == ParamStringType.BRIDGE_DECLARE_FOR_WRAPPER:
# the way bridge declares its params for wrapper, will turn the param
# type to class<?> value for reflection to use.
# XWalkViewInternal view => coreBridge.getWrapperClass("XWalkView")
# DirectionInternal direnction =>
# coreBridge.getWrapperClass("XWalkView$Direction")
# String name => String.class
if is_internal_class:
return 'coreBridge.getWrapperClass("%s")' % java_data.GetWrapperName()
else:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
if param_type in self._class_java_data.enums:
return ('coreBridge.getWrapperClass("%s")' %
self._class_java_data.GetWrapperName(param_type))
else:
return ConvertClassExpressionToClassType(param_type)
elif param_string_type == ParamStringType.BRIDGE_PASS_TO_SUPER:
# the way bridge passes the param to super
# XWalkViewInternal view => view
if is_internal_class:
return java_data.UseAsInstanceInBridgeSuperCall(param_name)
else:
return param_name
elif param_string_type == ParamStringType.BRIDGE_PASS_TO_WRAPPER:
# the way bridge passes the param to wrapper
# XWalkViewInternal view => view.getWrapper()
# DirectionInternal direction => ConvertDirectionInternal(direction)
if is_internal_class:
return java_data.UseAsInstanceInBridgeCall(param_name)
elif (typed_param.generic_type == 'ValueCallback' and
typed_param.contains_internal_class):
assert len(typed_param.generic_type_parameters) == 1
internal_generic_type_param = typed_param.generic_type_parameters[0]
internal_generic_type_class = self.GetJavaData(
internal_generic_type_param.generic_type)
return ('new ValueCallback<Object>() {\n' +
' @Override\n' +
' public void onReceiveValue(Object value) {\n' +
' %sFinal.onReceiveValue((%s) ' % (
param_name, internal_generic_type_class.bridge_name) +
'coreBridge.getBridgeObject(value));\n' +
' }\n' +
' }')
else:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
if param_type in self._class_java_data.enums:
return 'Convert%s(%s)' % (param_type, param_name)
else:
return param_name
elif param_string_type == ParamStringType.INTERNAL_PASS_TO_BRIDGE:
# the way bridge accepts param from internal
# XWalkViewInternal view => (XWalkViewBridge) view
if is_internal_class:
return java_data.UseAsInstanceInBridgeOverrideCall(param_name)
else:
return param_name
elif param_string_type == ParamStringType.BRIDGE_OVERRIDE_CONDITION:
# the way bridge uses as the condition for whether call super or
# call wrapper in override call
# XWalkViewInternal view => (view instanceof XWalkViewBridge)
if (is_internal_class and
not java_data.HasInstanceCreateInternallyAnnotation()):
return'(%s instanceof %s)' % (param_name, java_data.GetBridgeName())
else:
return None
elif param_string_type == ParamStringType.WRAPPER_DECLARE:
# the way wrapper declare the param
# XWalkViewInternal view => XWalkView view
# DirectionInternal direction => Direction direction
if is_internal_class:
return '%s %s' % (java_data.UseAsTypeInWrapperCall(), param_name)
elif param_type in self._class_java_data.enums:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
return '%s %s' % (param_type.replace('Internal', ''), param_name)
else:
return '%s %s' % (param_type, param_name)
elif param_string_type == ParamStringType.WRAPPER_DECLARE_FOR_BRIDGE:
# the way wrapper declares its params for bridge, will turn the param
# type to class<?> value for reflection to use.
# XWalkViewInternal view =>
# coreWrapper.getBridgeClass("XWalkViewBridge")
# DirectionInternal direction => enumDirectionClass
# String name => String.class
# TODO(wang16): Currently there is no internal classes for static method.
# Need to support it in future.
if is_internal_class:
return 'coreWrapper.getBridgeClass("%s")' % java_data.GetBridgeName()
else:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
enums = self._class_java_data.enums
if param_type in enums:
return ('coreWrapper.getBridgeClass("%s")' %
self._class_java_data.GetBridgeName(param_type))
else:
return ConvertClassExpressionToClassType(param_type)
elif param_string_type == ParamStringType.WRAPPER_PASS_TO_BRIDGE:
# the way wrapper passes param to bridge
# XWalkViewInternal view => view.getBridge()
# DirectionInternal direction => ConvertDirection(direction)
if is_internal_class:
return java_data.UseAsInstanceInWrapperCall(param_name)
elif param_type in self._class_java_data.enums:
# TODO(wang16): Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
return 'Convert%s(%s)' % (param_type.replace('Internal', ''),
param_name)
else:
return param_name
else:
pass
def GenerateMethodDeclareName(self):
name = self.method_name
for param_name in self.params:
# Remove modifier and generic type.
name += ConvertClassExpressionToClassType(
self.params[param_name]).replace('.class', '')
name = name.replace('[]', 'Array');
if self._is_constructor:
return '%sConstructor' % name
else:
return '%sMethod' % name
def GenerateBridgeConstructor(self):
if (self._bridge_params_declare != ''):
template = Template("""\
public ${NAME}(${PARAMS}, Object wrapper) {
super(${PARAMS_PASSING});
this.wrapper = wrapper;
reflectionInit();
${POST_BRIDGE_LINES}
}
""")
post_bridge_string = self._method_annotations.get(
self.ANNOTATION_POST_BRIDGELINE, '')
value = {'NAME': self._class_java_data.bridge_name,
'PARAMS': self._bridge_params_declare,
'PARAMS_PASSING': self._bridge_params_pass_to_super,
'POST_BRIDGE_LINES': post_bridge_string}
return template.substitute(value)
else:
template = Template("""\
public ${NAME}(Object wrapper) {
super();
this.wrapper = wrapper;
reflectionInit();
}
""")
value = {'NAME': self._class_java_data.bridge_name,
'PARAMS': self._bridge_params_declare,
'PARAMS_PASSING': self._bridge_params_pass_to_super}
return template.substitute(value)
def GenerateBridgeStaticMethod(self):
template = Template("""\
public static ${RETURN_TYPE} ${NAME}($PARAMS) {
${RETURN}${CLASS_NAME}.${NAME}(${PARAMS_PASSING});
}
""")
value = {'RETURN_TYPE': self.method_return,
'NAME': self.method_name,
'PARAMS': self._bridge_params_declare,
'RETURN': '' if self._method_return == 'void' else 'return ',
'CLASS_NAME': self._class_name,
'PARAMS_PASSING': self._bridge_params_pass_to_super}
return template.substitute(value)
def GenerateBridgeOverrideMethod(self):
if not self._bridge_override_condition:
return ' @Override'
template = Template("""\
@Override
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
if (${IF_CONDITION}) {
${RETURN}${NAME}(${BRIDGE_PARAMS_PASSING});
} else {
${RETURN}super.${NAME}(${PARAMS_PASSING});
}
}
""")
value = {'NAME': self.method_name,
'RETURN_TYPE': self.method_return,
'PARAMS': self._internal_params_declare,
'RETURN': '' if self._method_return == 'void' else 'return ',
'IF_CONDITION': self._bridge_override_condition,
'PARAMS_PASSING': self._bridge_params_pass_to_super,
'BRIDGE_PARAMS_PASSING': self._internal_params_pass_to_bridge}
return template.substitute(value)
def GenerateBridgeWrapperMethod(self):
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
if return_is_internal:
template = Template("""\
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
if (${METHOD_DECLARE_NAME}.isNull()) {
${RETURN_SUPER}${NAME}Super(${PARAMS_PASSING_SUPER});
} else {
${GENERIC_TYPE_DECLARE}${RETURN}coreBridge.getBridgeObject(\
${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING}));
}
}
""")
elif self._is_abstract:
template = Template("""\
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
${GENERIC_TYPE_DECLARE}${RETURN}${METHOD_DECLARE_NAME}.invoke(\
${PARAMS_PASSING});
}
""")
else :
template = Template("""\
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
if (${METHOD_DECLARE_NAME}.isNull()) {
${RETURN_SUPER}${NAME}Super(${PARAMS_PASSING_SUPER});
} else {
${GENERIC_TYPE_DECLARE}${RETURN}${METHOD_DECLARE_NAME}.invoke(\
${PARAMS_PASSING});
}
}
""")
if self._method_return == 'void':
return_statement = ''
return_statement_super = ''
elif return_is_internal:
return_statement = 'return (%s)' % return_type_java_data.bridge_name
return_statement_super = 'return '
else:
return_statement = ('return (%s)' %
ConvertPrimitiveTypeToObject(self.method_return))
return_statement_super = 'return '
# Handling generic types, current only ValueCallback will be handled.
generic_type_declare = ''
for param_name in self._typed_params:
typed_param = self._typed_params[param_name]
if typed_param.generic_type != 'ValueCallback':
continue
if typed_param.contains_internal_class:
generic_type_declare += 'final %s %sFinal = %s;\n ' % (
typed_param.expression, param_name, param_name)
value = {'RETURN_TYPE': self.method_return,
'NAME': self.method_name,
'METHOD_DECLARE_NAME': self._method_declare_name,
'PARAMS': self._bridge_params_declare,
'RETURN': return_statement,
'RETURN_SUPER': return_statement_super,
'GENERIC_TYPE_DECLARE': generic_type_declare,
'PARAMS_PASSING_SUPER': self._bridge_params_pass_to_super,
'PARAMS_PASSING': self._bridge_params_pass_to_wrapper}
return template.substitute(value)
def GenerateBridgeSuperMethod(self):
no_return_value = self._method_return == 'void'
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
if self._is_abstract:
return ''
if self._class_java_data.HasCreateInternallyAnnotation():
if no_return_value:
template = Template("""\
public void ${NAME}Super(${PARAMS}) {
if (internal == null) {
super.${NAME}(${PARAM_PASSING});
} else {
internal.${NAME}(${PARAM_PASSING});
}
}
""")
else:
template = Template("""\
public ${RETURN_TYPE} ${NAME}Super(${PARAMS}) {
${INTERNAL_RETURN_TYPE} ret;
if (internal == null) {
ret = super.${NAME}(${PARAM_PASSING});
} else {
ret = internal.${NAME}(${PARAM_PASSING});
}
${IF_NULL_RETURN_NULL}
return ${RETURN_VALUE};
}
""")
else:
if no_return_value:
template = Template("""\
public void ${NAME}Super(${PARAMS}) {
super.${NAME}(${PARAM_PASSING});
}
""")
else:
template = Template("""\
public ${RETURN_TYPE} ${NAME}Super(${PARAMS}) {
${INTERNAL_RETURN_TYPE} ret;
ret = super.${NAME}(${PARAM_PASSING});
${IF_NULL_RETURN_NULL}
return ${RETURN_VALUE};
}
""")
if return_is_internal:
return_value = return_type_java_data.UseAsReturnInBridgeSuperCall('ret')
method_return = return_type_java_data.bridge_name
else:
return_value = 'ret'
method_return = self._method_return
if ConvertPrimitiveTypeToObject(method_return) != method_return:
# it's returning prmitive type, so it can't be null.
if_null_return_null = ''
else:
if_null_return_null = 'if (ret == null) return null;'
value = {
'RETURN_TYPE': method_return,
'INTERNAL_RETURN_TYPE': self.method_return,
'NAME': self.method_name,
'PARAM_PASSING': self._bridge_params_pass_to_super,
'PARAMS': self._bridge_params_declare,
'IF_NULL_RETURN_NULL': if_null_return_null,
'RETURN_VALUE': return_value
}
return template.substitute(value)
def GenerateWrapperConstructor(self):
# TODO(wang16): Currently, only support pre/post wrapper lines for
# Constructors.
template = Template("""\
${DOC}
public ${CLASS_NAME}(${PARAMS}) {
${PRE_WRAP_LINES}
reflectionInit();
}
""")
pre_wrap_string = self._method_annotations.get(
self.ANNOTATION_PRE_WRAPLINE, '')
post_wrap_string = self._method_annotations.get(
self.ANNOTATION_POST_WRAPLINE, '')
if (pre_wrap_string != ''):
pre_wrap_string += "\n\n"
pre_wrap_string += " constructorTypes = new ArrayList<Object>();\n"
for param_type in self._wrapper_params_declare_for_bridge.split(', '):
if (param_type != ''):
param_type = param_type.replace('coreWrapper.getBridgeClass(', '')
param_type = param_type.replace(')', '')
pre_wrap_string += (" constructorTypes.add(%s);\n" % param_type)
pre_wrap_string += "\n"
pre_wrap_string += " constructorParams = new ArrayList<Object>();\n"
for param_name in self._wrapper_params_pass_to_bridge.split(', '):
if (param_name != ''):
param_name = param_name.replace('.getBridge()', '')
pre_wrap_string += " constructorParams.add(%s);\n" % param_name
if (post_wrap_string != ''):
pre_wrap_string += ("""
postWrapperMethod = new ReflectMethod(this,
\"post%s\");\n""" % self._method_declare_name)
value = {'DOC': self.GenerateDoc(self.method_doc),
'CLASS_NAME': self._class_java_data.wrapper_name,
'PARAMS': self._wrapper_params_declare,
'PRE_WRAP_LINES': pre_wrap_string}
ret = template.substitute(value)
if (post_wrap_string != ''):
template = Template("""\
public void post${POST_WRAP_METHOD}() {
${POST_WRAP_LINES}
}
""")
value = {'POST_WRAP_METHOD': self._method_declare_name,
'POST_WRAP_LINES': post_wrap_string}
ret += template.substitute(value)
return ret
def GenerateWrapperStaticMethod(self):
if self.is_reservable:
template = Template("""\
${DOC}
public static ${RETURN_TYPE} ${NAME}(${PARAMS}) {
reflectionInit();
try {
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
} catch (UnsupportedOperationException e) {
if (coreWrapper == null) {
${METHOD_DECLARE_NAME}.setArguments(${PARAMS_PASSING});
XWalkCoreWrapper.reserveReflectMethod(${METHOD_DECLARE_NAME});
} else {
XWalkCoreWrapper.handleRuntimeError(e);
}
}
${RETURN_NULL}
}
""")
else:
template = Template("""\
${DOC}
public static ${RETURN_TYPE} ${NAME}(${PARAMS}) {
reflectionInit();
try {
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
} catch (UnsupportedOperationException e) {
if (coreWrapper == null) {
Assert.fail("Cannot call this method before xwalk is ready");
} else {
XWalkCoreWrapper.handleRuntimeError(e);
}
}
${RETURN_NULL}
}
""")
return_type = self.method_return
if self._method_return == 'void':
return_state = ''
return_null = ''
else:
return_state = 'return (%s) ' % ConvertPrimitiveTypeToObject(return_type)
return_null = 'return %s;' % GetPrimitiveTypeDefaultValue(return_type)
value = {'RETURN_TYPE': self.method_return,
'RETURN': return_state,
'RETURN_NULL': return_null,
'DOC': self.GenerateDoc(self.method_doc),
'NAME': self.method_name,
'PARAMS': self._wrapper_params_declare,
'METHOD_DECLARE_NAME': self._method_declare_name,
'PARAMS_PASSING': self._wrapper_params_pass_to_bridge}
return template.substitute(value)
def GenerateWrapperBridgeMethod(self):
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
if self.is_abstract:
template = Template(
'${DOC}\n' +
' public abstract ${RETURN_TYPE} ${NAME}(${PARAMS});\n\n')
elif return_is_internal:
template = Template("""\
${DOC}
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
try {
return (${RETURN_TYPE}) coreWrapper.getWrapperObject(\
${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING}));
} catch (UnsupportedOperationException e) {
if (coreWrapper == null) {
Assert.fail("Cannot call this method before xwalk is ready");
} else {
XWalkCoreWrapper.handleRuntimeError(e);
}
}
${RETURN_NULL}
}
""")
elif self.is_reservable:
template = Template("""\
${DOC}
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
try {
${RETURN}${METHOD_DECLARE_NAME}.invoke(${PARAMS_PASSING});
} catch (UnsupportedOperationException e) {
if (coreWrapper == null) {
${METHOD_DECLARE_NAME}.setArguments(${PARAMS_RESERVING});
XWalkCoreWrapper.reserveReflectMethod(${METHOD_DECLARE_NAME});
} else {
XWalkCoreWrapper.handleRuntimeError(e);
}
}
${RETURN_NULL}
}
""")
elif self._is_delegate:
template = Template("""\
private ${RETURN_TYPE} ${NAME}(${PARAMS}){
${PRE_WRAP_LINES}
}
""")
elif self._disable_reflect_method:
template = Template("""\
${DOC}
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
${PRE_WRAP_LINES}
}
""")
else:
prefix_str = """\
${DOC}
public ${RETURN_TYPE} ${NAME}(${PARAMS}) {
try {\n"""
suffix_str = """\n } catch (UnsupportedOperationException e) {
if (coreWrapper == null) {
Assert.fail("Cannot call this method before xwalk is ready");
} else {
XWalkCoreWrapper.handleRuntimeError(e);
}
}
${RETURN_NULL}
}
"""
return_str = """ ${RETURN}${METHOD_DECLARE_NAME}.invoke(\
${PARAMS_PASSING});"""
if self._method_return in self._class_java_data.enums:
# Here only detects enum declared in the same class as
# the method itself. Using enum across class is not supported.
self._method_return = self._method_return.replace('Internal', '')
return_str = """ ${RETURN} %s.valueOf(\
${METHOD_DECLARE_NAME}.invoke(\
${PARAMS_PASSING}).toString());""" % self._method_return
template = Template(prefix_str + return_str + suffix_str)
if return_is_internal:
return_type = return_type_java_data.wrapper_name
else:
return_type = self.method_return
if self._method_return == 'void':
return_state = ''
return_null = ''
else:
return_state = 'return (%s)' % ConvertPrimitiveTypeToObject(return_type)
return_null = 'return %s;' % GetPrimitiveTypeDefaultValue(return_type)
params_reserving = []
for param in self._wrapper_params_pass_to_bridge.split(', '):
if (param.find("getBridge()") > 0):
param = param.replace('.getBridge()', '')
params_reserving.append(
'new ReflectMethod(%s, "getBridge")' % param)
else:
params_reserving.append(param)
pre_wrap_string = self._method_annotations.get(
self.ANNOTATION_PRE_WRAPLINE, '')
value = {'RETURN_TYPE': return_type,
'RETURN': return_state,
'RETURN_NULL': return_null,
'DOC': self.GenerateDoc(self.method_doc),
'NAME': self.method_name,
'PARAMS': re.sub(r'ValueCallback<([A-Za-z]+)Internal>',
r'ValueCallback<\1>',self._wrapper_params_declare),
'METHOD_DECLARE_NAME': self._method_declare_name,
'PARAMS_RESERVING': ', '.join(params_reserving),
'PARAMS_PASSING': self._wrapper_params_pass_to_bridge,
'PRE_WRAP_LINES': pre_wrap_string}
return template.substitute(value)
def GenerateWrapperInterface(self):
return_is_internal = self.IsInternalClass(self._method_return)
if return_is_internal:
return_type_java_data = self.GetJavaData(self._method_return)
template = Template(
'${DOC}\n' +
' public ${RETURN_TYPE} ${NAME}(${PARAMS});\n\n')
if return_is_internal:
return_type = return_type_java_data.wrapper_name
else:
return_type = self.method_return
value = {'RETURN_TYPE': return_type,
'DOC': self.GenerateDoc(self.method_doc),
'NAME': self.method_name,
'PARAMS': self._wrapper_params_declare}
return template.substitute(value)
def GenerateMethodsStringForBridge(self):
if self._is_constructor:
return self.GenerateBridgeConstructor()
elif self._is_static:
return self.GenerateBridgeStaticMethod()
else:
return '%s\n%s\n%s\n%s\n' % (
self.GenerateBridgeOverrideMethod(),
self.GenerateBridgeWrapperMethod(),
self.GenerateBridgeSuperMethod(),
' private ReflectMethod %s = new ReflectMethod(null, "%s");\n' %
(self._method_declare_name, self._method_name))
def GenerateMethodsStringForWrapper(self):
if self._is_constructor:
return self.GenerateWrapperConstructor()
elif self._is_static:
return '%s\n%s\n' % (
self.GenerateWrapperStaticMethod(), """\
private static ReflectMethod %s = new ReflectMethod(null, "%s");\n""" %
(self._method_declare_name, self._method_name))
elif self._is_abstract or self._is_delegate or self._disable_reflect_method:
return self.GenerateWrapperBridgeMethod()
else:
return '%s\n%s\n' % (
self.GenerateWrapperBridgeMethod(),
' private ReflectMethod %s = new ReflectMethod(null, "%s");\n' %
(self._method_declare_name, self._method_name))
def GenerateMethodsStringForInterface(self):
return self.GenerateWrapperInterface()
| bsd-3-clause | 2,949,512,605,676,438,000 | 35.734694 | 84 | 0.620889 | false |
Funtimezzhou/TradeBuildTools | SAT eBook/chapter15/strategy.py | 2 | 1077 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# strategy.py
from __future__ import print_function
from abc import ABCMeta, abstractmethod
import datetime
try:
import Queue as queue
except ImportError:
import queue
import numpy as np
import pandas as pd
from event import SignalEvent
class Strategy(object):
# Strategy is an abstract base class providing an interface for
# all subsequent (inherited) strategy handling objects.
# The goal of a (derived) Strategy object is to generate Signal
# objects for particular symbols based on the inputs of Bars
# (OHLCV) generated by a DataHandler object.
# This is designed to work both with historic and live data as
# the Strategy object is agnostic to where the data came from,
# since it obtains the bar tuples from a queue object.
__metaclass__ = ABCMeta
@abstractmethod
def calculate_signals(self):
# Provides the mechanisms to calculate the list of signals.
raise NotImplementedError("Should implement calculate_signals()")
| gpl-3.0 | 5,644,252,694,532,509,000 | 24.642857 | 73 | 0.704735 | false |
BrandonY/python-docs-samples | appengine/standard/mail/handle_bounced_email.py | 9 | 1136 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from google.appengine.ext.webapp.mail_handlers import BounceNotificationHandler
import webapp2
# [START bounce_handler]
class LogBounceHandler(BounceNotificationHandler):
def receive(self, bounce_message):
logging.info('Received bounce post ... [%s]', self.request)
logging.info('Bounce original: %s', bounce_message.original)
logging.info('Bounce notification: %s', bounce_message.notification)
# [END bounce_handler]
app = webapp2.WSGIApplication([LogBounceHandler.mapping()], debug=True)
| apache-2.0 | -6,149,980,936,640,265,000 | 36.866667 | 79 | 0.756162 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/image_disk_reference_py3.py | 3 | 1378 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImageDiskReference(Model):
"""The source image used for creating the disk.
All required parameters must be populated in order to send to Azure.
:param id: Required. A relative uri containing either a Platform Imgage
Repository or user image reference.
:type id: str
:param lun: If the disk is created from an image's data disk, this is an
index that indicates which of the data disks in the image to use. For OS
disks, this field is null.
:type lun: int
"""
_validation = {
'id': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'lun': {'key': 'lun', 'type': 'int'},
}
def __init__(self, *, id: str, lun: int=None, **kwargs) -> None:
super(ImageDiskReference, self).__init__(**kwargs)
self.id = id
self.lun = lun
| mit | 7,268,603,149,453,334,000 | 32.609756 | 77 | 0.576923 | false |
nutztherookie/wagtail | wagtail/wagtailsearch/index.py | 4 | 9027 | from __future__ import absolute_import, unicode_literals
import inspect
import logging
from django.apps import apps
from django.core import checks
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import ForeignObjectRel, OneToOneRel, RelatedField
from wagtail.wagtailsearch.backends import get_search_backends_with_name
logger = logging.getLogger('wagtail.search.index')
class Indexed(object):
@classmethod
def indexed_get_parent(cls, require_model=True):
for base in cls.__bases__:
if issubclass(base, Indexed) and (issubclass(base, models.Model) or require_model is False):
return base
@classmethod
def indexed_get_content_type(cls):
# Work out content type
content_type = (cls._meta.app_label + '_' + cls.__name__).lower()
# Get parent content type
parent = cls.indexed_get_parent()
if parent:
parent_content_type = parent.indexed_get_content_type()
return parent_content_type + '_' + content_type
else:
return content_type
@classmethod
def indexed_get_toplevel_content_type(cls):
# Get parent content type
parent = cls.indexed_get_parent()
if parent:
return parent.indexed_get_content_type()
else:
# At toplevel, return this content type
return (cls._meta.app_label + '_' + cls.__name__).lower()
@classmethod
def get_search_fields(cls):
search_fields = {}
for field in cls.search_fields:
search_fields[(type(field), field.field_name)] = field
return list(search_fields.values())
@classmethod
def get_searchable_search_fields(cls):
return [
field for field in cls.get_search_fields()
if isinstance(field, SearchField)
]
@classmethod
def get_filterable_search_fields(cls):
return [
field for field in cls.get_search_fields()
if isinstance(field, FilterField)
]
@classmethod
def get_indexed_objects(cls):
queryset = cls.objects.all()
# Add prefetch/select related for RelatedFields
for field in cls.get_search_fields():
if isinstance(field, RelatedFields):
queryset = field.select_on_queryset(queryset)
return queryset
def get_indexed_instance(self):
"""
If the indexed model uses multi table inheritance, override this method
to return the instance in its most specific class so it reindexes properly.
"""
return self
@classmethod
def _has_field(cls, name):
try:
cls._meta.get_field(name)
return True
except models.fields.FieldDoesNotExist:
return hasattr(cls, name)
@classmethod
def check(cls, **kwargs):
errors = super(Indexed, cls).check(**kwargs)
errors.extend(cls._check_search_fields(**kwargs))
return errors
@classmethod
def _check_search_fields(cls, **kwargs):
errors = []
for field in cls.get_search_fields():
message = "{model}.search_fields contains field '{name}' but it doesn't exist"
if not cls._has_field(field.field_name):
errors.append(
checks.Warning(
message.format(model=cls.__name__, name=field.field_name),
obj=cls,
)
)
return errors
search_fields = []
def get_indexed_models():
return [
model for model in apps.get_models()
if issubclass(model, Indexed) and not model._meta.abstract
]
def class_is_indexed(cls):
return issubclass(cls, Indexed) and issubclass(cls, models.Model) and not cls._meta.abstract
def get_indexed_instance(instance, check_exists=True):
indexed_instance = instance.get_indexed_instance()
if indexed_instance is None:
return
# Make sure that the instance is in its class's indexed objects
if check_exists and not type(indexed_instance).get_indexed_objects().filter(pk=indexed_instance.pk).exists():
return
return indexed_instance
def insert_or_update_object(instance):
indexed_instance = get_indexed_instance(instance)
if indexed_instance:
for backend_name, backend in get_search_backends_with_name(with_auto_update=True):
try:
backend.add(indexed_instance)
except Exception:
# Catch and log all errors
logger.exception("Exception raised while adding %r into the '%s' search backend", indexed_instance, backend_name)
def remove_object(instance):
indexed_instance = get_indexed_instance(instance, check_exists=False)
if indexed_instance:
for backend_name, backend in get_search_backends_with_name(with_auto_update=True):
try:
backend.delete(indexed_instance)
except Exception:
# Catch and log all errors
logger.exception("Exception raised while deleting %r from the '%s' search backend", indexed_instance, backend_name)
class BaseField(object):
def __init__(self, field_name, **kwargs):
self.field_name = field_name
self.kwargs = kwargs
def get_field(self, cls):
return cls._meta.get_field(self.field_name)
def get_attname(self, cls):
try:
field = self.get_field(cls)
return field.attname
except models.fields.FieldDoesNotExist:
return self.field_name
def get_definition_model(self, cls):
try:
field = self.get_field(cls)
return field.model
except models.fields.FieldDoesNotExist:
# Find where it was defined by walking the inheritance tree
for base_cls in inspect.getmro(cls):
if self.field_name in base_cls.__dict__:
return base_cls
def get_type(self, cls):
if 'type' in self.kwargs:
return self.kwargs['type']
try:
field = self.get_field(cls)
return field.get_internal_type()
except models.fields.FieldDoesNotExist:
return 'CharField'
def get_value(self, obj):
try:
field = self.get_field(obj.__class__)
value = field.value_from_object(obj)
if hasattr(field, 'get_searchable_content'):
value = field.get_searchable_content(value)
return value
except models.fields.FieldDoesNotExist:
value = getattr(obj, self.field_name, None)
if hasattr(value, '__call__'):
value = value()
return value
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.field_name)
class SearchField(BaseField):
def __init__(self, field_name, boost=None, partial_match=False, **kwargs):
super(SearchField, self).__init__(field_name, **kwargs)
self.boost = boost
self.partial_match = partial_match
class FilterField(BaseField):
pass
class RelatedFields(object):
def __init__(self, field_name, fields):
self.field_name = field_name
self.fields = fields
def get_field(self, cls):
return cls._meta.get_field(self.field_name)
def get_definition_model(self, cls):
field = self.get_field(cls)
return field.model
def get_value(self, obj):
field = self.get_field(obj.__class__)
if isinstance(field, RelatedField):
return getattr(obj, self.field_name)
def select_on_queryset(self, queryset):
"""
This method runs either prefetch_related or select_related on the queryset
to improve indexing speed of the relation.
It decides which method to call based on the number of related objects:
- single (eg ForeignKey, OneToOne), it runs select_related
- multiple (eg ManyToMany, reverse ForeignKey) it runs prefetch_related
"""
try:
field = self.get_field(queryset.model)
except FieldDoesNotExist:
return queryset
if isinstance(field, RelatedField):
if field.many_to_one or field.one_to_one:
queryset = queryset.select_related(self.field_name)
elif field.one_to_many or field.many_to_many:
queryset = queryset.prefetch_related(self.field_name)
elif isinstance(field, ForeignObjectRel):
# Reverse relation
if isinstance(field, OneToOneRel):
# select_related for reverse OneToOneField
queryset = queryset.select_related(self.field_name)
else:
# prefetch_related for anything else (reverse ForeignKey/ManyToManyField)
queryset = queryset.prefetch_related(self.field_name)
return queryset
| bsd-3-clause | -870,381,035,167,821,400 | 31.588448 | 131 | 0.611942 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.